aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
committerJiri Kosina <jkosina@suse.cz>2011-02-15 04:24:31 -0500
commit0a9d59a2461477bd9ed143c01af9df3f8f00fa81 (patch)
treedf997d1cfb0786427a0df1fbd6f0640fa4248cf4 /drivers
parenta23ce6da9677d245aa0aadc99f4197030350ab54 (diff)
parent795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 (diff)
Merge branch 'master' into for-next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig20
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c3
-rw-r--r--drivers/acpi/acpi_ipmi.c525
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h23
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h16
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h10
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c64
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evgpe.c269
-rw-r--r--drivers/acpi/acpica/evgpeblk.c35
-rw-r--r--drivers/acpi/acpica/evgpeinit.c27
-rw-r--r--drivers/acpi/acpica/evgpeutil.c41
-rw-r--r--drivers/acpi/acpica/evmisc.c94
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c79
-rw-r--r--drivers/acpi/acpica/evxfevnt.c602
-rw-r--r--drivers/acpi/acpica/evxfgpe.c669
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c10
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c34
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c8
-rw-r--r--drivers/acpi/acpica/nsalloc.c15
-rw-r--r--drivers/acpi/acpica/nsdump.c17
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c4
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c7
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c4
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psparse.c27
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c9
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c5
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c3
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/cper.c311
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c26
-rw-r--r--drivers/acpi/battery.c15
-rw-r--r--drivers/acpi/bus.c153
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c5
-rw-r--r--drivers/acpi/fan.c27
-rw-r--r--drivers/acpi/glue.c5
-rw-r--r--drivers/acpi/internal.h13
-rw-r--r--drivers/acpi/numa.c8
-rw-r--r--drivers/acpi/nvs.c145
-rw-r--r--drivers/acpi/osl.c23
-rw-r--r--drivers/acpi/pci_root.c35
-rw-r--r--drivers/acpi/power.c128
-rw-r--r--drivers/acpi/proc.c41
-rw-r--r--drivers/acpi/processor_core.c4
-rw-r--r--drivers/acpi/processor_driver.c80
-rw-r--r--drivers/acpi/processor_idle.c28
-rw-r--r--drivers/acpi/processor_throttling.c190
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c70
-rw-r--r--drivers/acpi/sleep.c15
-rw-r--r--drivers/acpi/sysfs.c19
-rw-r--r--drivers/acpi/thermal.c5
-rw-r--r--drivers/acpi/video.c104
-rw-r--r--drivers/acpi/video_detect.c57
-rw-r--r--drivers/acpi/wakeup.c22
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/pata_hpt366.c6
-rw-r--r--drivers/ata/pata_hpt37x.c112
-rw-r--r--drivers/ata/pata_hpt3x2n.c12
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/node.c21
-rw-r--r--drivers/base/power/runtime.c9
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/Makefile2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/bluetooth/ath3k.c75
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/Kconfig12
-rw-r--r--drivers/char/Makefile12
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/amd-k7-agp.c19
-rw-r--r--drivers/char/agp/intel-agp.c31
-rw-r--r--drivers/char/agp/intel-gtt.c21
-rw-r--r--drivers/char/bfin_jtag_comm.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c27
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c35
-rw-r--r--drivers/char/tpm/tpm.c28
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c10
-rw-r--r--drivers/char/virtio_console.c20
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpuidle/cpuidle.c92
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/amba-pl08x.c1168
-rw-r--r--drivers/dma/at_hdmac.c19
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c33
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/pch_dma.c19
-rw-r--r--drivers/dma/ste_dma40.c191
-rw-r--r--drivers/dma/ste_dma40_ll.c246
-rw-r--r--drivers/dma/ste_dma40_ll.h36
-rw-r--r--drivers/edac/amd64_edac.c28
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/core-card.c11
-rw-r--r--drivers/firewire/net.c9
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/gpio/cs5535-gpio.c93
-rw-r--r--drivers/gpio/langwell_gpio.c9
-rw-r--r--drivers/gpio/pca953x.c28
-rw-r--r--drivers/gpio/timbgpio.c6
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c45
-rw-r--r--drivers/gpu/drm/drm_irq.c7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c47
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c59
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c82
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c46
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c57
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c74
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r300.c18
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c34
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c15
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rv515.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig64
-rw-r--r--drivers/hid/usbhid/Kconfig2
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/applesmc.c1
-rw-r--r--drivers/hwmon/asus_atk0110.c23
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c2
-rw-r--r--drivers/hwmon/lm63.c59
-rw-r--r--drivers/hwmon/lm93.c21
-rw-r--r--drivers/i2c/busses/scx200_acb.c200
-rw-r--r--drivers/i2c/i2c-core.c90
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/idle/intel_idle.c65
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/device.c11
-rw-r--r--drivers/infiniband/core/sa_query.c4
-rw-r--r--drivers/infiniband/core/ucma.c22
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mthca/Kconfig2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c35
-rw-r--r--drivers/infiniband/hw/nes/nes.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c95
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h10
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c78
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c37
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c33
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c19
-rw-r--r--drivers/input/Kconfig6
-rw-r--r--drivers/input/keyboard/Kconfig14
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/tegra-kbc.c727
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c5
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/mouse/Kconfig10
-rw-r--r--drivers/input/mouse/synaptics.c32
-rw-r--r--drivers/input/serio/Kconfig6
-rw-r--r--drivers/input/serio/ct82c710.c8
-rw-r--r--drivers/input/serio/serport.c24
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/wacom_wac.c27
-rw-r--r--drivers/input/touchscreen/Kconfig30
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c39
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h2
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c26
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c3
-rw-r--r--drivers/isdn/icn/icn.c3
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/ledtrig-gpio.c15
-rw-r--r--drivers/lguest/page_tables.c2
-rw-r--r--drivers/lguest/x86/core.c4
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/md/Kconfig24
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/dm-crypt.c618
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-ioctl.c111
-rw-r--r--drivers/md/dm-kcopyd.c57
-rw-r--r--drivers/md/dm-log-userspace-base.c139
-rw-r--r--drivers/md/dm-log-userspace-transfer.c1
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c67
-rw-r--r--drivers/md/dm-raid.c697
-rw-r--r--drivers/md/dm-raid1.c19
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-snap.c62
-rw-r--r--drivers/md/dm-stripe.c27
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm.c23
-rw-r--r--drivers/md/md.c245
-rw-r--r--drivers/md/md.h15
-rw-r--r--drivers/md/raid0.c40
-rw-r--r--drivers/md/raid1.c33
-rw-r--r--drivers/md/raid10.c23
-rw-r--r--drivers/md/raid5.c62
-rw-r--r--drivers/media/common/saa7146_core.c2
-rw-r--r--drivers/media/common/saa7146_fops.c8
-rw-r--r--drivers/media/common/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146_video.c20
-rw-r--r--drivers/media/common/tuners/Kconfig2
-rw-r--r--drivers/media/common/tuners/tda8290.c130
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c6
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c9
-rw-r--r--drivers/media/dvb/frontends/Kconfig2
-rw-r--r--drivers/media/dvb/frontends/af9013.c4
-rw-r--r--drivers/media/dvb/frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb/frontends/mb86a20s.c36
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c2
-rw-r--r--drivers/media/radio/Kconfig14
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c1
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c478
-rw-r--r--drivers/media/radio/radio-maxiradio.c4
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c9
-rw-r--r--drivers/media/rc/ene_ir.c23
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/imon.c60
-rw-r--r--drivers/media/rc/ir-lirc-codec.c6
-rw-r--r--drivers/media/rc/ir-raw.c2
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c52
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c6
-rw-r--r--drivers/media/rc/mceusb.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.c6
-rw-r--r--drivers/media/rc/rc-main.c28
-rw-r--r--drivers/media/rc/streamzap.c14
-rw-r--r--drivers/media/video/Kconfig11
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7175.c11
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c39
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/cafe_ccic.c15
-rw-r--r--drivers/media/video/cpia2/cpia2.h2
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c65
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c104
-rw-r--r--drivers/media/video/cx18/cx18-driver.c24
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-streams.h3
-rw-r--r--drivers/media/video/cx231xx/cx231xx-dvb.c5
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c22
-rw-r--r--drivers/media/video/davinci/vpif.c177
-rw-r--r--drivers/media/video/davinci/vpif.h18
-rw-r--r--drivers/media/video/davinci/vpif_capture.c451
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.c474
-rw-r--r--drivers/media/video/davinci/vpif_display.h2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c19
-rw-r--r--drivers/media/video/et61x251/et61x251.h24
-rw-r--r--drivers/media/video/gspca/benq.c2
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/cpia1.c2
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/finepix.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/video/gspca/gspca.c210
-rw-r--r--drivers/media/video/gspca/gspca.h2
-rw-r--r--drivers/media/video/gspca/jeilinj.c2
-rw-r--r--drivers/media/video/gspca/jpeg.h4
-rw-r--r--drivers/media/video/gspca/konica.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c2
-rw-r--r--drivers/media/video/gspca/mars.c2
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/ov534.c29
-rw-r--r--drivers/media/video/gspca/ov534_9.c2
-rw-r--r--drivers/media/video/gspca/pac207.c2
-rw-r--r--drivers/media/video/gspca/pac7302.c4
-rw-r--r--drivers/media/video/gspca/pac7311.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/sonixb.c270
-rw-r--r--drivers/media/video/gspca/sonixj.c155
-rw-r--r--drivers/media/video/gspca/spca1528.c2
-rw-r--r--drivers/media/video/gspca/spca500.c2
-rw-r--r--drivers/media/video/gspca/spca501.c2
-rw-r--r--drivers/media/video/gspca/spca505.c2
-rw-r--r--drivers/media/video/gspca/spca508.c2
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/sq905.c2
-rw-r--r--drivers/media/video/gspca/sq905c.c2
-rw-r--r--drivers/media/video/gspca/sq930x.c2
-rw-r--r--drivers/media/video/gspca/stk014.c2
-rw-r--r--drivers/media/video/gspca/stv0680.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/gspca/sunplus.c2
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/gspca/tv8532.c2
-rw-r--r--drivers/media/video/gspca/vc032x.c2
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c33
-rw-r--r--drivers/media/video/hdpvr/Makefile4
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c32
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c149
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c7
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h8
-rw-r--r--drivers/media/video/ir-kbd-i2c.c25
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c9
-rw-r--r--drivers/media/video/mt9v011.c54
-rw-r--r--drivers/media/video/mt9v011.h36
-rw-r--r--drivers/media/video/ov7670.c74
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c61
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c51
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c80
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h74
-rw-r--r--drivers/media/video/sr030pc30.c10
-rw-r--r--drivers/media/video/tda9875.c411
-rw-r--r--drivers/media/video/tlg2300/pd-video.c13
-rw-r--r--drivers/media/video/v4l2-common.c19
-rw-r--r--drivers/media/video/v4l2-ctrls.c34
-rw-r--r--drivers/media/video/v4l2-dev.c9
-rw-r--r--drivers/media/video/v4l2-device.c16
-rw-r--r--drivers/media/video/v4l2-ioctl.c20
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/zoran/zoran_card.c2
-rw-r--r--drivers/mfd/88pm860x-core.c36
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab3550-core.c28
-rw-r--r--drivers/mfd/ab8500-core.c306
-rw-r--r--drivers/mfd/ab8500-debugfs.c1016
-rw-r--r--drivers/mfd/ab8500-spi.c143
-rw-r--r--drivers/mfd/asic3.c62
-rw-r--r--drivers/mfd/cs5535-mfd.c151
-rw-r--r--drivers/mfd/ezx-pcap.c25
-rw-r--r--drivers/mfd/htc-egpio.c27
-rw-r--r--drivers/mfd/htc-i2cpld.c40
-rw-r--r--drivers/mfd/jz4740-adc.c25
-rw-r--r--drivers/mfd/max8925-core.c30
-rw-r--r--drivers/mfd/max8998-irq.c37
-rw-r--r--drivers/mfd/max8998.c134
-rw-r--r--drivers/mfd/mc13xxx-core.c2
-rw-r--r--drivers/mfd/mfd-core.c4
-rw-r--r--drivers/mfd/sm501.c9
-rw-r--r--drivers/mfd/stmpe.c28
-rw-r--r--drivers/mfd/t7l66xb.c20
-rw-r--r--drivers/mfd/tc6393xb.c22
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6586x.c36
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-irq.c28
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/vx855.c2
-rw-r--r--drivers/mfd/wm831x-core.c17
-rw-r--r--drivers/mfd/wm831x-i2c.c14
-rw-r--r--drivers/mfd/wm831x-irq.c53
-rw-r--r--drivers/mfd/wm831x-spi.c18
-rw-r--r--drivers/mfd/wm8350-irq.c32
-rw-r--r--drivers/mfd/wm8994-core.c46
-rw-r--r--drivers/mfd/wm8994-irq.c32
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/cs5535-mfgpt.c73
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c5
-rw-r--r--drivers/mmc/host/mmci.c109
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/msm_sdcc.c52
-rw-r--r--drivers/mmc/host/sdhci-of-core.c9
-rw-r--r--drivers/mmc/host/sdhci-s3c.c36
-rw-r--r--drivers/mmc/host/ushc.c1
-rw-r--r--drivers/mtd/Kconfig19
-rw-r--r--drivers/mtd/Makefile2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c55
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c116
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c2
-rw-r--r--drivers/mtd/chips/fwh_lock.h2
-rw-r--r--drivers/mtd/devices/m25p80.c39
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/maps/amd76xrom.c7
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c5
-rw-r--r--drivers/mtd/maps/ck804xrom.c7
-rw-r--r--drivers/mtd/maps/esb2rom.c9
-rw-r--r--drivers/mtd/maps/ichxrom.c9
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/maps/scx200_docflash.c5
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/mtdchar.c12
-rw-r--r--drivers/mtd/mtdconcat.c1
-rw-r--r--drivers/mtd/mtdoops.c3
-rw-r--r--drivers/mtd/mtdpart.c30
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/ams-delta.c80
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c89
-rw-r--r--drivers/mtd/nand/jz4740_nand.c57
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_base.c25
-rw-r--r--drivers/mtd/nand/nand_bbt.c3
-rw-r--r--drivers/mtd/nand/nandsim.c39
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c5
-rw-r--r--drivers/mtd/onenand/omap2.c80
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c10
-rw-r--r--drivers/mtd/onenand/samsung.c7
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/arm/ks8695net.c290
-rw-r--r--drivers/net/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/benet/be_cmds.c5
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/bfin_mac.c9
-rw-r--r--drivers/net/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/bnx2.c21
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c234
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c59
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/bonding/bond_3ad.c4
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c893
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/cnic.c12
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c10
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c2
-rw-r--r--drivers/net/e1000e/lib.c20
-rw-r--r--drivers/net/e1000e/netdev.c224
-rw-r--r--drivers/net/e1000e/param.c6
-rw-r--r--drivers/net/e1000e/phy.c4
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/gianfar.h10
-rw-r--r--drivers/net/greth.c221
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/mlx4/catas.c6
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/main.c17
-rw-r--r--drivers/net/mlx4/mcg.c23
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/niu.c61
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/r8169.c84
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/falcon.c25
-rw-r--r--drivers/net/sfc/net_driver.h10
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/tg3.c95
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tile/tilepro.c10
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c246
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/vxge/vxge-config.c2
-rw-r--r--drivers/net/vxge/vxge-main.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c40
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c11
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wl12xx/spi.c3
-rw-r--r--drivers/net/xen-netfront.c96
-rw-r--r--drivers/nfc/pn544.c2
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/parport/share.c4
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/msi.h6
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-stub.c7
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c25
-rw-r--r--drivers/pci/pci.h14
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h3
-rw-r--r--drivers/pci/pcie/aspm.c21
-rw-r--r--drivers/pci/pcie/pme.c31
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c23
-rw-r--r--drivers/pci/pcie/portdrv_core.c25
-rw-r--r--drivers/pci/pcie/portdrv_pci.c37
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c8
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/pnp/Makefile6
-rw-r--r--drivers/pnp/core.c7
-rw-r--r--drivers/pnp/driver.c7
-rw-r--r--drivers/pnp/isapnp/Makefile6
-rw-r--r--drivers/pnp/pnpacpi/Makefile3
-rw-r--r--drivers/pnp/pnpacpi/core.c93
-rw-r--r--drivers/pnp/pnpbios/Makefile5
-rw-r--r--drivers/power/Kconfig20
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/collie_battery.c13
-rw-r--r--drivers/power/ds2760_battery.c2
-rw-r--r--drivers/power/gpio-charger.c188
-rw-r--r--drivers/power/intel_mid_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c201
-rw-r--r--drivers/power/jz4740-battery.c13
-rw-r--r--drivers/power/max17042_battery.c239
-rw-r--r--drivers/power/olpc_battery.c114
-rw-r--r--drivers/power/power_supply_core.c6
-rw-r--r--drivers/power/s3c_adc_battery.c12
-rw-r--r--drivers/power/tosa_battery.c13
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/power/z2_battery.c6
-rw-r--r--drivers/pps/clients/pps-ktimer.c2
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/pps/generators/pps_gen_parport.c2
-rw-r--r--drivers/rapidio/rio-scan.c2
-rw-r--r--drivers/regulator/max8998.c94
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/interface.c64
-rw-r--r--drivers/rtc/rtc-max8998.c54
-rw-r--r--drivers/rtc/rtc-proc.c6
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c149
-rw-r--r--drivers/s390/net/qeth_l2_main.c22
-rw-r--r--drivers/s390/net/qeth_l3_main.c22
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c114
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c64
-rw-r--r--drivers/scsi/pmcraid.c7
-rw-r--r--drivers/scsi/sd.c103
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/sfi/sfi_core.c2
-rw-r--r--drivers/sh/intc/chip.c6
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/ath79_spi.c292
-rw-r--r--drivers/spi/dw_spi_mmio.c5
-rw-r--r--drivers/spi/spi_imx.c6
-rw-r--r--drivers/spi/spi_sh_msiof.c8
-rw-r--r--drivers/spi/spi_tegra.c2
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/ssb/pcmcia.c2
-rw-r--r--drivers/ssb/scan.c10
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c4
-rw-r--r--drivers/staging/autofs/dirhash.c5
-rw-r--r--drivers/staging/bcm/Qos.c7
-rw-r--r--drivers/staging/bcm/Transmit.c6
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c57
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c14
-rw-r--r--drivers/staging/brcm80211/sys/wlc_pub.h2
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/hv/blkvsc_drv.c1
-rw-r--r--drivers/staging/hv/netvsc.c2
-rw-r--r--drivers/staging/hv/netvsc_drv.c2
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c2
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c2
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c2
-rw-r--r--drivers/staging/iio/dac/ad5446.c2
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c5
-rw-r--r--drivers/staging/lirc/TODO.lirc_zilog36
-rw-r--r--drivers/staging/lirc/lirc_imon.c1
-rw-r--r--drivers/staging/lirc/lirc_it87.c1
-rw-r--r--drivers/staging/lirc/lirc_parallel.c19
-rw-r--r--drivers/staging/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/lirc/lirc_serial.c3
-rw-r--r--drivers/staging/lirc/lirc_sir.c1
-rw-r--r--drivers/staging/lirc/lirc_zilog.c678
-rw-r--r--drivers/staging/msm/msm_fb.c8
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c10
-rw-r--r--drivers/staging/rt2860/rt_main_dev.c2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8712/hal_init.c11
-rw-r--r--drivers/staging/rtl8712/usb_intf.c145
-rw-r--r--drivers/staging/sm7xx/smtcfb.c10
-rw-r--r--drivers/staging/smbfs/dir.c4
-rw-r--r--drivers/staging/speakup/kobjects.c2
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c19
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c8
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c15
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h21
-rw-r--r--drivers/staging/tm6000/tm6000-video.c46
-rw-r--r--drivers/staging/usbip/stub.h1
-rw-r--r--drivers/staging/usbip/stub_dev.c18
-rw-r--r--drivers/staging/usbip/stub_rx.c4
-rw-r--r--drivers/staging/usbip/vhci.h6
-rw-r--r--drivers/staging/usbip/vhci_hcd.c54
-rw-r--r--drivers/staging/usbip/vhci_rx.c50
-rw-r--r--drivers/staging/vme/bridges/Module.symvers0
-rw-r--r--drivers/staging/xgifb/vb_setmode.c6
-rw-r--r--drivers/staging/zram/zram_drv.c4
-rw-r--r--drivers/target/Kconfig32
-rw-r--r--drivers/target/Makefile24
-rw-r--r--drivers/target/target_core_alua.c1991
-rw-r--r--drivers/target/target_core_alua.h126
-rw-r--r--drivers/target/target_core_cdb.c1131
-rw-r--r--drivers/target/target_core_configfs.c3225
-rw-r--r--drivers/target/target_core_device.c1694
-rw-r--r--drivers/target/target_core_fabric_configfs.c996
-rw-r--r--drivers/target/target_core_fabric_lib.c451
-rw-r--r--drivers/target/target_core_file.c688
-rw-r--r--drivers/target/target_core_file.h50
-rw-r--r--drivers/target/target_core_hba.c185
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c808
-rw-r--r--drivers/target/target_core_iblock.h40
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pr.c4252
-rw-r--r--drivers/target/target_core_pr.h67
-rw-r--r--drivers/target/target_core_pscsi.c1470
-rw-r--r--drivers/target/target_core_pscsi.h65
-rw-r--r--drivers/target/target_core_rd.c1091
-rw-r--r--drivers/target/target_core_rd.h73
-rw-r--r--drivers/target/target_core_scdb.c105
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_tmr.c404
-rw-r--r--drivers/target/target_core_tpg.c826
-rw-r--r--drivers/target/target_core_transport.c6134
-rw-r--r--drivers/target/target_core_ua.c332
-rw-r--r--drivers/target/target_core_ua.h36
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c120
-rw-r--r--drivers/tty/Makefile2
-rw-r--r--drivers/tty/hvc/Makefile12
-rw-r--r--drivers/tty/hvc/hvc_beat.c (renamed from drivers/char/hvc_beat.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.c (renamed from drivers/char/hvc_console.c)0
-rw-r--r--drivers/tty/hvc/hvc_console.h (renamed from drivers/char/hvc_console.h)0
-rw-r--r--drivers/tty/hvc/hvc_dcc.c (renamed from drivers/char/hvc_dcc.c)0
-rw-r--r--drivers/tty/hvc/hvc_irq.c (renamed from drivers/char/hvc_irq.c)0
-rw-r--r--drivers/tty/hvc/hvc_iseries.c (renamed from drivers/char/hvc_iseries.c)0
-rw-r--r--drivers/tty/hvc/hvc_iucv.c (renamed from drivers/char/hvc_iucv.c)0
-rw-r--r--drivers/tty/hvc/hvc_rtas.c (renamed from drivers/char/hvc_rtas.c)0
-rw-r--r--drivers/tty/hvc/hvc_tile.c (renamed from drivers/char/hvc_tile.c)0
-rw-r--r--drivers/tty/hvc/hvc_udbg.c (renamed from drivers/char/hvc_udbg.c)0
-rw-r--r--drivers/tty/hvc/hvc_vio.c (renamed from drivers/char/hvc_vio.c)0
-rw-r--r--drivers/tty/hvc/hvc_xen.c (renamed from drivers/char/hvc_xen.c)0
-rw-r--r--drivers/tty/hvc/hvcs.c (renamed from drivers/char/hvcs.c)0
-rw-r--r--drivers/tty/hvc/hvsi.c (renamed from drivers/char/hvsi.c)0
-rw-r--r--drivers/tty/n_gsm.c1
-rw-r--r--drivers/tty/n_hdlc.c90
-rw-r--r--drivers/tty/serial/21285.c (renamed from drivers/serial/21285.c)0
-rw-r--r--drivers/tty/serial/68328serial.c (renamed from drivers/serial/68328serial.c)0
-rw-r--r--drivers/tty/serial/68328serial.h (renamed from drivers/serial/68328serial.h)0
-rw-r--r--drivers/tty/serial/68360serial.c (renamed from drivers/serial/68360serial.c)1
-rw-r--r--drivers/tty/serial/8250.c (renamed from drivers/serial/8250.c)3
-rw-r--r--drivers/tty/serial/8250.h (renamed from drivers/serial/8250.h)0
-rw-r--r--drivers/tty/serial/8250_accent.c (renamed from drivers/serial/8250_accent.c)0
-rw-r--r--drivers/tty/serial/8250_acorn.c (renamed from drivers/serial/8250_acorn.c)0
-rw-r--r--drivers/tty/serial/8250_boca.c (renamed from drivers/serial/8250_boca.c)0
-rw-r--r--drivers/tty/serial/8250_early.c (renamed from drivers/serial/8250_early.c)0
-rw-r--r--drivers/tty/serial/8250_exar_st16c554.c (renamed from drivers/serial/8250_exar_st16c554.c)0
-rw-r--r--drivers/tty/serial/8250_fourport.c (renamed from drivers/serial/8250_fourport.c)0
-rw-r--r--drivers/tty/serial/8250_gsc.c (renamed from drivers/serial/8250_gsc.c)0
-rw-r--r--drivers/tty/serial/8250_hp300.c (renamed from drivers/serial/8250_hp300.c)0
-rw-r--r--drivers/tty/serial/8250_hub6.c (renamed from drivers/serial/8250_hub6.c)0
-rw-r--r--drivers/tty/serial/8250_mca.c (renamed from drivers/serial/8250_mca.c)0
-rw-r--r--drivers/tty/serial/8250_pci.c (renamed from drivers/serial/8250_pci.c)0
-rw-r--r--drivers/tty/serial/8250_pnp.c (renamed from drivers/serial/8250_pnp.c)0
-rw-r--r--drivers/tty/serial/Kconfig (renamed from drivers/serial/Kconfig)5
-rw-r--r--drivers/tty/serial/Makefile (renamed from drivers/serial/Makefile)0
-rw-r--r--drivers/tty/serial/altera_jtaguart.c (renamed from drivers/serial/altera_jtaguart.c)0
-rw-r--r--drivers/tty/serial/altera_uart.c (renamed from drivers/serial/altera_uart.c)0
-rw-r--r--drivers/tty/serial/amba-pl010.c (renamed from drivers/serial/amba-pl010.c)0
-rw-r--r--drivers/tty/serial/amba-pl011.c (renamed from drivers/serial/amba-pl011.c)0
-rw-r--r--drivers/tty/serial/apbuart.c (renamed from drivers/serial/apbuart.c)0
-rw-r--r--drivers/tty/serial/apbuart.h (renamed from drivers/serial/apbuart.h)0
-rw-r--r--drivers/tty/serial/atmel_serial.c (renamed from drivers/serial/atmel_serial.c)5
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c (renamed from drivers/serial/bcm63xx_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_5xx.c (renamed from drivers/serial/bfin_5xx.c)15
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c (renamed from drivers/serial/bfin_sport_uart.c)0
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h (renamed from drivers/serial/bfin_sport_uart.h)0
-rw-r--r--drivers/tty/serial/clps711x.c (renamed from drivers/serial/clps711x.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile (renamed from drivers/serial/cpm_uart/Makefile)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h (renamed from drivers/serial/cpm_uart/cpm_uart.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c (renamed from drivers/serial/cpm_uart/cpm_uart_core.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm1.h)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.c)0
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h (renamed from drivers/serial/cpm_uart/cpm_uart_cpm2.h)0
-rw-r--r--drivers/tty/serial/crisv10.c (renamed from drivers/serial/crisv10.c)0
-rw-r--r--drivers/tty/serial/crisv10.h (renamed from drivers/serial/crisv10.h)0
-rw-r--r--drivers/tty/serial/dz.c (renamed from drivers/serial/dz.c)0
-rw-r--r--drivers/tty/serial/dz.h (renamed from drivers/serial/dz.h)0
-rw-r--r--drivers/tty/serial/icom.c (renamed from drivers/serial/icom.c)0
-rw-r--r--drivers/tty/serial/icom.h (renamed from drivers/serial/icom.h)0
-rw-r--r--drivers/tty/serial/ifx6x60.c (renamed from drivers/serial/ifx6x60.c)0
-rw-r--r--drivers/tty/serial/ifx6x60.h (renamed from drivers/serial/ifx6x60.h)0
-rw-r--r--drivers/tty/serial/imx.c (renamed from drivers/serial/imx.c)0
-rw-r--r--drivers/tty/serial/ioc3_serial.c (renamed from drivers/serial/ioc3_serial.c)0
-rw-r--r--drivers/tty/serial/ioc4_serial.c (renamed from drivers/serial/ioc4_serial.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.c (renamed from drivers/serial/ip22zilog.c)0
-rw-r--r--drivers/tty/serial/ip22zilog.h (renamed from drivers/serial/ip22zilog.h)0
-rw-r--r--drivers/tty/serial/jsm/Makefile (renamed from drivers/serial/jsm/Makefile)0
-rw-r--r--drivers/tty/serial/jsm/jsm.h (renamed from drivers/serial/jsm/jsm.h)0
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c (renamed from drivers/serial/jsm/jsm_driver.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c (renamed from drivers/serial/jsm/jsm_neo.c)0
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c (renamed from drivers/serial/jsm/jsm_tty.c)0
-rw-r--r--drivers/tty/serial/kgdboc.c (renamed from drivers/serial/kgdboc.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.c (renamed from drivers/serial/m32r_sio.c)0
-rw-r--r--drivers/tty/serial/m32r_sio.h (renamed from drivers/serial/m32r_sio.h)0
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h (renamed from drivers/serial/m32r_sio_reg.h)0
-rw-r--r--drivers/tty/serial/max3100.c (renamed from drivers/serial/max3100.c)0
-rw-r--r--drivers/tty/serial/max3107-aava.c (renamed from drivers/serial/max3107-aava.c)0
-rw-r--r--drivers/tty/serial/max3107.c (renamed from drivers/serial/max3107.c)0
-rw-r--r--drivers/tty/serial/max3107.h (renamed from drivers/serial/max3107.h)0
-rw-r--r--drivers/tty/serial/mcf.c (renamed from drivers/serial/mcf.c)0
-rw-r--r--drivers/tty/serial/mfd.c (renamed from drivers/serial/mfd.c)0
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c (renamed from drivers/serial/mpc52xx_uart.c)0
-rw-r--r--drivers/tty/serial/mpsc.c (renamed from drivers/serial/mpsc.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.c (renamed from drivers/serial/mrst_max3110.c)0
-rw-r--r--drivers/tty/serial/mrst_max3110.h (renamed from drivers/serial/mrst_max3110.h)0
-rw-r--r--drivers/tty/serial/msm_serial.c (renamed from drivers/serial/msm_serial.c)0
-rw-r--r--drivers/tty/serial/msm_serial.h (renamed from drivers/serial/msm_serial.h)0
-rw-r--r--drivers/tty/serial/mux.c (renamed from drivers/serial/mux.c)0
-rw-r--r--drivers/tty/serial/netx-serial.c (renamed from drivers/serial/netx-serial.c)0
-rw-r--r--drivers/tty/serial/nwpserial.c (renamed from drivers/serial/nwpserial.c)0
-rw-r--r--drivers/tty/serial/of_serial.c (renamed from drivers/serial/of_serial.c)0
-rw-r--r--drivers/tty/serial/omap-serial.c (renamed from drivers/serial/omap-serial.c)0
-rw-r--r--drivers/tty/serial/pch_uart.c (renamed from drivers/serial/pch_uart.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.c (renamed from drivers/serial/pmac_zilog.c)0
-rw-r--r--drivers/tty/serial/pmac_zilog.h (renamed from drivers/serial/pmac_zilog.h)0
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c (renamed from drivers/serial/pnx8xxx_uart.c)0
-rw-r--r--drivers/tty/serial/pxa.c (renamed from drivers/serial/pxa.c)0
-rw-r--r--drivers/tty/serial/s3c2400.c (renamed from drivers/serial/s3c2400.c)0
-rw-r--r--drivers/tty/serial/s3c2410.c (renamed from drivers/serial/s3c2410.c)0
-rw-r--r--drivers/tty/serial/s3c2412.c (renamed from drivers/serial/s3c2412.c)0
-rw-r--r--drivers/tty/serial/s3c2440.c (renamed from drivers/serial/s3c2440.c)0
-rw-r--r--drivers/tty/serial/s3c24a0.c (renamed from drivers/serial/s3c24a0.c)0
-rw-r--r--drivers/tty/serial/s3c6400.c (renamed from drivers/serial/s3c6400.c)0
-rw-r--r--drivers/tty/serial/s5pv210.c (renamed from drivers/serial/s5pv210.c)0
-rw-r--r--drivers/tty/serial/sa1100.c (renamed from drivers/serial/sa1100.c)0
-rw-r--r--drivers/tty/serial/samsung.c (renamed from drivers/serial/samsung.c)4
-rw-r--r--drivers/tty/serial/samsung.h (renamed from drivers/serial/samsung.h)0
-rw-r--r--drivers/tty/serial/sb1250-duart.c (renamed from drivers/serial/sb1250-duart.c)2
-rw-r--r--drivers/tty/serial/sc26xx.c (renamed from drivers/serial/sc26xx.c)0
-rw-r--r--drivers/tty/serial/serial_core.c (renamed from drivers/serial/serial_core.c)0
-rw-r--r--drivers/tty/serial/serial_cs.c (renamed from drivers/serial/serial_cs.c)0
-rw-r--r--drivers/tty/serial/serial_ks8695.c (renamed from drivers/serial/serial_ks8695.c)0
-rw-r--r--drivers/tty/serial/serial_lh7a40x.c (renamed from drivers/serial/serial_lh7a40x.c)0
-rw-r--r--drivers/tty/serial/serial_txx9.c (renamed from drivers/serial/serial_txx9.c)0
-rw-r--r--drivers/tty/serial/sh-sci.c (renamed from drivers/serial/sh-sci.c)0
-rw-r--r--drivers/tty/serial/sh-sci.h (renamed from drivers/serial/sh-sci.h)0
-rw-r--r--drivers/tty/serial/sn_console.c (renamed from drivers/serial/sn_console.c)0
-rw-r--r--drivers/tty/serial/suncore.c (renamed from drivers/serial/suncore.c)0
-rw-r--r--drivers/tty/serial/suncore.h (renamed from drivers/serial/suncore.h)0
-rw-r--r--drivers/tty/serial/sunhv.c (renamed from drivers/serial/sunhv.c)0
-rw-r--r--drivers/tty/serial/sunsab.c (renamed from drivers/serial/sunsab.c)0
-rw-r--r--drivers/tty/serial/sunsab.h (renamed from drivers/serial/sunsab.h)0
-rw-r--r--drivers/tty/serial/sunsu.c (renamed from drivers/serial/sunsu.c)0
-rw-r--r--drivers/tty/serial/sunzilog.c (renamed from drivers/serial/sunzilog.c)0
-rw-r--r--drivers/tty/serial/sunzilog.h (renamed from drivers/serial/sunzilog.h)0
-rw-r--r--drivers/tty/serial/timbuart.c (renamed from drivers/serial/timbuart.c)0
-rw-r--r--drivers/tty/serial/timbuart.h (renamed from drivers/serial/timbuart.h)0
-rw-r--r--drivers/tty/serial/uartlite.c (renamed from drivers/serial/uartlite.c)0
-rw-r--r--drivers/tty/serial/ucc_uart.c (renamed from drivers/serial/ucc_uart.c)0
-rw-r--r--drivers/tty/serial/vr41xx_siu.c (renamed from drivers/serial/vr41xx_siu.c)0
-rw-r--r--drivers/tty/serial/vt8500_serial.c (renamed from drivers/serial/vt8500_serial.c)0
-rw-r--r--drivers/tty/serial/zs.c (renamed from drivers/serial/zs.c)0
-rw-r--r--drivers/tty/serial/zs.h (renamed from drivers/serial/zs.h)0
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/vt/selection.c4
-rw-r--r--drivers/tty/vt/vc_screen.c16
-rw-r--r--drivers/tty/vt/vt.c135
-rw-r--r--drivers/tty/vt/vt_ioctl.c60
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c26
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c268
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h9
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/pch_udc.c127
-rw-r--r--drivers/usb/gadget/printer.c19
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c13
-rw-r--r--drivers/usb/host/ehci-fsl.h3
-rw-r--r--drivers/usb/host/ehci-hcd.c19
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-mxc.c25
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-pci.c35
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c11
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/xhci-ring.c91
-rw-r--r--drivers/usb/host/xhci.c60
-rw-r--r--drivers/usb/host/xhci.h16
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/uss720.c1
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c10
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_dma.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c71
-rw-r--r--drivers/usb/musb/musb_gadget.h8
-rw-r--r--drivers/usb/musb/musb_host.c11
-rw-r--r--drivers/usb/musb/musbhsdma.h19
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c2
-rw-r--r--drivers/usb/otg/ulpi.c2
-rw-r--r--drivers/usb/serial/ch341.c10
-rw-r--r--drivers/usb/serial/cp210x.c16
-rw-r--r--drivers/usb/serial/digi_acceleport.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c39
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h32
-rw-r--r--drivers/usb/serial/generic.c20
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/io_tables.h1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c1
-rw-r--r--drivers/usb/serial/keyspan.h4
-rw-r--r--drivers/usb/serial/keyspan_pda.c17
-rw-r--r--drivers/usb/serial/moto_modem.c1
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/serial/pl2303.c12
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcaux.c3
-rw-r--r--drivers/usb/serial/siemens_mpi.c1
-rw-r--r--drivers/usb/serial/spcp8x5.c7
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c8
-rw-r--r--drivers/usb/serial/usb_debug.c1
-rw-r--r--drivers/usb/storage/unusual_cypress.h5
-rw-r--r--drivers/usb/storage/unusual_devs.h32
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/arkfb.c12
-rw-r--r--drivers/video/aty/aty128fb.c12
-rw-r--r--drivers/video/aty/atyfb_base.c10
-rw-r--r--drivers/video/aty/radeon_pm.c10
-rw-r--r--drivers/video/backlight/88pm860x_bl.c4
-rw-r--r--drivers/video/bf537-lq035.c58
-rw-r--r--drivers/video/chipsfb.c8
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c42
-rw-r--r--drivers/video/console/vgacon.c6
-rw-r--r--drivers/video/da8xx-fb.c11
-rw-r--r--drivers/video/ep93xx-fb.c6
-rw-r--r--drivers/video/fbmem.c12
-rw-r--r--drivers/video/fbsysfs.c20
-rw-r--r--drivers/video/geode/gxfb_core.c8
-rw-r--r--drivers/video/geode/lxfb_core.c8
-rw-r--r--drivers/video/i810/i810_main.c8
-rw-r--r--drivers/video/jz4740_fb.c8
-rw-r--r--drivers/video/mx3fb.c8
-rw-r--r--drivers/video/nuc900fb.c6
-rw-r--r--drivers/video/nvidia/nvidia.c8
-rw-r--r--drivers/video/ps3fb.c16
-rw-r--r--drivers/video/pxa168fb.c6
-rw-r--r--drivers/video/pxa3xx-gcu.c4
-rw-r--r--drivers/video/s3fb.c16
-rw-r--r--drivers/video/savage/savagefb_driver.c8
-rw-r--r--drivers/video/sh_mobile_hdmi.c8
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/sm501fb.c8
-rw-r--r--drivers/video/tmiofb.c10
-rw-r--r--drivers/video/via/viafbdev.c8
-rw-r--r--drivers/video/vt8623fb.c12
-rw-r--r--drivers/video/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio_pci.c20
-rw-r--r--drivers/w1/masters/omap_hdq.c28
-rw-r--r--drivers/xen/Kconfig11
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/gntdev.c665
-rw-r--r--drivers/xen/grant-table.c46
-rw-r--r--drivers/xen/platform-pci.c21
-rw-r--r--drivers/xen/xenfs/xenbus.c31
1168 files changed, 48874 insertions, 11421 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dd0a5b5e9bf3..9bfb71ff3a6a 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@ source "drivers/ata/Kconfig"
26 26
27source "drivers/md/Kconfig" 27source "drivers/md/Kconfig"
28 28
29source "drivers/target/Kconfig"
30
29source "drivers/message/fusion/Kconfig" 31source "drivers/message/fusion/Kconfig"
30 32
31source "drivers/firewire/Kconfig" 33source "drivers/firewire/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index ef5132469f58..b423bb16c3a8 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_XEN) += xen/
24# regulators early, since some subsystems rely on them to initialize 24# regulators early, since some subsystems rely on them to initialize
25obj-$(CONFIG_REGULATOR) += regulator/ 25obj-$(CONFIG_REGULATOR) += regulator/
26 26
27# char/ comes before serial/ etc so that the VT console is the boot-time 27# tty/ comes before char/ so that the VT console is the boot-time
28# default. 28# default.
29obj-y += tty/ 29obj-y += tty/
30obj-y += char/ 30obj-y += char/
@@ -38,7 +38,6 @@ obj-$(CONFIG_CONNECTOR) += connector/
38obj-$(CONFIG_FB_I810) += video/i810/ 38obj-$(CONFIG_FB_I810) += video/i810/
39obj-$(CONFIG_FB_INTEL) += video/intelfb/ 39obj-$(CONFIG_FB_INTEL) += video/intelfb/
40 40
41obj-y += serial/
42obj-$(CONFIG_PARPORT) += parport/ 41obj-$(CONFIG_PARPORT) += parport/
43obj-y += base/ block/ misc/ mfd/ nfc/ 42obj-y += base/ block/ misc/ mfd/ nfc/
44obj-$(CONFIG_NUBUS) += nubus/ 43obj-$(CONFIG_NUBUS) += nubus/
@@ -46,6 +45,7 @@ obj-y += macintosh/
46obj-$(CONFIG_IDE) += ide/ 45obj-$(CONFIG_IDE) += ide/
47obj-$(CONFIG_SCSI) += scsi/ 46obj-$(CONFIG_SCSI) += scsi/
48obj-$(CONFIG_ATA) += ata/ 47obj-$(CONFIG_ATA) += ata/
48obj-$(CONFIG_TARGET_CORE) += target/
49obj-$(CONFIG_MTD) += mtd/ 49obj-$(CONFIG_MTD) += mtd/
50obj-$(CONFIG_SPI) += spi/ 50obj-$(CONFIG_SPI) += spi/
51obj-y += net/ 51obj-y += net/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3f3489c5ca8c..2aa042a5da6d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -51,12 +51,7 @@ config ACPI_PROCFS
51 For backwards compatibility, this option allows 51 For backwards compatibility, this option allows
52 deprecated /proc/acpi/ files to exist, even when 52 deprecated /proc/acpi/ files to exist, even when
53 they have been replaced by functions in /sys. 53 they have been replaced by functions in /sys.
54 The deprecated files (and their replacements) include:
55 54
56 /proc/acpi/processor/*/throttling (/sys/class/thermal/
57 cooling_device*/*)
58 /proc/acpi/video/*/brightness (/sys/class/backlight/)
59 /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
60 This option has no effect on /proc/acpi/ files 55 This option has no effect on /proc/acpi/ files
61 and functions which do not yet exist in /sys. 56 and functions which do not yet exist in /sys.
62 57
@@ -74,6 +69,8 @@ config ACPI_PROCFS_POWER
74 /proc/acpi/ac_adapter/* (sys/class/power_supply/*) 69 /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
75 This option has no effect on /proc/acpi/ directories 70 This option has no effect on /proc/acpi/ directories
76 and functions, which do not yet exist in /sys 71 and functions, which do not yet exist in /sys
72 This option, together with the proc directories, will be
73 deleted in 2.6.39.
77 74
78 Say N to delete power /proc/acpi/ directories that have moved to /sys/ 75 Say N to delete power /proc/acpi/ directories that have moved to /sys/
79 76
@@ -209,6 +206,17 @@ config ACPI_PROCESSOR
209 206
210 To compile this driver as a module, choose M here: 207 To compile this driver as a module, choose M here:
211 the module will be called processor. 208 the module will be called processor.
209config ACPI_IPMI
210 tristate "IPMI"
211 depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
212 default n
213 help
214 This driver enables the ACPI to access the BMC controller. And it
215 uses the IPMI request/response message to communicate with BMC
216 controller, which can be found on on the server.
217
218 To compile this driver as a module, choose M here:
219 the module will be called as acpi_ipmi.
212 220
213config ACPI_HOTPLUG_CPU 221config ACPI_HOTPLUG_CPU
214 bool 222 bool
@@ -310,7 +318,7 @@ config ACPI_PCI_SLOT
310 the module will be called pci_slot. 318 the module will be called pci_slot.
311 319
312config X86_PM_TIMER 320config X86_PM_TIMER
313 bool "Power Management Timer Support" if EMBEDDED 321 bool "Power Management Timer Support" if EXPERT
314 depends on X86 322 depends on X86
315 default y 323 default y
316 help 324 help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3d031d02e54b..d113fa5100b2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += atomicio.o
24# sleep related files 24# sleep related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
26acpi-y += sleep.o 26acpi-y += sleep.o
27acpi-$(CONFIG_ACPI_SLEEP) += proc.o 27acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
28 28
29 29
30# 30#
@@ -69,5 +69,6 @@ processor-y += processor_idle.o processor_thermal.o
69processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 69processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
70 70
71obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 71obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
72obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o
72 73
73obj-$(CONFIG_ACPI_APEI) += apei/ 74obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 25d3aaebc10d..58c3f74bd84c 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -197,7 +197,8 @@ static int acpi_ac_add_fs(struct acpi_device *device)
197{ 197{
198 struct proc_dir_entry *entry = NULL; 198 struct proc_dir_entry *entry = NULL;
199 199
200 200 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
201 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
201 if (!acpi_device_dir(device)) { 202 if (!acpi_device_dir(device)) {
202 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 203 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
203 acpi_ac_dir); 204 acpi_ac_dir);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
new file mode 100644
index 000000000000..f40acef80269
--- /dev/null
+++ b/drivers/acpi/acpi_ipmi.c
@@ -0,0 +1,525 @@
1/*
2 * acpi_ipmi.c - ACPI IPMI opregion
3 *
4 * Copyright (C) 2010 Intel Corporation
5 * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/types.h>
30#include <linux/delay.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/interrupt.h>
34#include <linux/list.h>
35#include <linux/spinlock.h>
36#include <linux/io.h>
37#include <acpi/acpi_bus.h>
38#include <acpi/acpi_drivers.h>
39#include <linux/ipmi.h>
40#include <linux/device.h>
41#include <linux/pnp.h>
42
43MODULE_AUTHOR("Zhao Yakui");
44MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
45MODULE_LICENSE("GPL");
46
47#define IPMI_FLAGS_HANDLER_INSTALL 0
48
49#define ACPI_IPMI_OK 0
50#define ACPI_IPMI_TIMEOUT 0x10
51#define ACPI_IPMI_UNKNOWN 0x07
52/* the IPMI timeout is 5s */
53#define IPMI_TIMEOUT (5 * HZ)
54
55struct acpi_ipmi_device {
56 /* the device list attached to driver_data.ipmi_devices */
57 struct list_head head;
58 /* the IPMI request message list */
59 struct list_head tx_msg_list;
60 struct mutex tx_msg_lock;
61 acpi_handle handle;
62 struct pnp_dev *pnp_dev;
63 ipmi_user_t user_interface;
64 int ipmi_ifnum; /* IPMI interface number */
65 long curr_msgid;
66 unsigned long flags;
67 struct ipmi_smi_info smi_data;
68};
69
70struct ipmi_driver_data {
71 struct list_head ipmi_devices;
72 struct ipmi_smi_watcher bmc_events;
73 struct ipmi_user_hndl ipmi_hndlrs;
74 struct mutex ipmi_lock;
75};
76
77struct acpi_ipmi_msg {
78 struct list_head head;
79 /*
80 * General speaking the addr type should be SI_ADDR_TYPE. And
81 * the addr channel should be BMC.
82 * In fact it can also be IPMB type. But we will have to
83 * parse it from the Netfn command buffer. It is so complex
84 * that it is skipped.
85 */
86 struct ipmi_addr addr;
87 long tx_msgid;
88 /* it is used to track whether the IPMI message is finished */
89 struct completion tx_complete;
90 struct kernel_ipmi_msg tx_message;
91 int msg_done;
92 /* tx data . And copy it from ACPI object buffer */
93 u8 tx_data[64];
94 int tx_len;
95 u8 rx_data[64];
96 int rx_len;
97 struct acpi_ipmi_device *device;
98};
99
100/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
101struct acpi_ipmi_buffer {
102 u8 status;
103 u8 length;
104 u8 data[64];
105};
106
107static void ipmi_register_bmc(int iface, struct device *dev);
108static void ipmi_bmc_gone(int iface);
109static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
110static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
111static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
112
113static struct ipmi_driver_data driver_data = {
114 .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
115 .bmc_events = {
116 .owner = THIS_MODULE,
117 .new_smi = ipmi_register_bmc,
118 .smi_gone = ipmi_bmc_gone,
119 },
120 .ipmi_hndlrs = {
121 .ipmi_recv_hndl = ipmi_msg_handler,
122 },
123};
124
125static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
126{
127 struct acpi_ipmi_msg *ipmi_msg;
128 struct pnp_dev *pnp_dev = ipmi->pnp_dev;
129
130 ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
131 if (!ipmi_msg) {
132 dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
133 return NULL;
134 }
135 init_completion(&ipmi_msg->tx_complete);
136 INIT_LIST_HEAD(&ipmi_msg->head);
137 ipmi_msg->device = ipmi;
138 return ipmi_msg;
139}
140
141#define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
142#define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
143static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
144 acpi_physical_address address,
145 acpi_integer *value)
146{
147 struct kernel_ipmi_msg *msg;
148 struct acpi_ipmi_buffer *buffer;
149 struct acpi_ipmi_device *device;
150
151 msg = &tx_msg->tx_message;
152 /*
153 * IPMI network function and command are encoded in the address
154 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
155 */
156 msg->netfn = IPMI_OP_RGN_NETFN(address);
157 msg->cmd = IPMI_OP_RGN_CMD(address);
158 msg->data = tx_msg->tx_data;
159 /*
160 * value is the parameter passed by the IPMI opregion space handler.
161 * It points to the IPMI request message buffer
162 */
163 buffer = (struct acpi_ipmi_buffer *)value;
164 /* copy the tx message data */
165 msg->data_len = buffer->length;
166 memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
167 /*
168 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
169 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
170 * the addr type should be changed to IPMB. Then we will have to parse
171 * the IPMI request message buffer to get the IPMB address.
172 * If so, please fix me.
173 */
174 tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
175 tx_msg->addr.channel = IPMI_BMC_CHANNEL;
176 tx_msg->addr.data[0] = 0;
177
178 /* Get the msgid */
179 device = tx_msg->device;
180 mutex_lock(&device->tx_msg_lock);
181 device->curr_msgid++;
182 tx_msg->tx_msgid = device->curr_msgid;
183 mutex_unlock(&device->tx_msg_lock);
184}
185
186static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
187 acpi_integer *value, int rem_time)
188{
189 struct acpi_ipmi_buffer *buffer;
190
191 /*
192 * value is also used as output parameter. It represents the response
193 * IPMI message returned by IPMI command.
194 */
195 buffer = (struct acpi_ipmi_buffer *)value;
196 if (!rem_time && !msg->msg_done) {
197 buffer->status = ACPI_IPMI_TIMEOUT;
198 return;
199 }
200 /*
201 * If the flag of msg_done is not set or the recv length is zero, it
202 * means that the IPMI command is not executed correctly.
203 * The status code will be ACPI_IPMI_UNKNOWN.
204 */
205 if (!msg->msg_done || !msg->rx_len) {
206 buffer->status = ACPI_IPMI_UNKNOWN;
207 return;
208 }
209 /*
210 * If the IPMI response message is obtained correctly, the status code
211 * will be ACPI_IPMI_OK
212 */
213 buffer->status = ACPI_IPMI_OK;
214 buffer->length = msg->rx_len;
215 memcpy(buffer->data, msg->rx_data, msg->rx_len);
216}
217
218static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
219{
220 struct acpi_ipmi_msg *tx_msg, *temp;
221 int count = HZ / 10;
222 struct pnp_dev *pnp_dev = ipmi->pnp_dev;
223
224 list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
225 /* wake up the sleep thread on the Tx msg */
226 complete(&tx_msg->tx_complete);
227 }
228
229 /* wait for about 100ms to flush the tx message list */
230 while (count--) {
231 if (list_empty(&ipmi->tx_msg_list))
232 break;
233 schedule_timeout(1);
234 }
235 if (!list_empty(&ipmi->tx_msg_list))
236 dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
237}
238
239static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
240{
241 struct acpi_ipmi_device *ipmi_device = user_msg_data;
242 int msg_found = 0;
243 struct acpi_ipmi_msg *tx_msg;
244 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
245
246 if (msg->user != ipmi_device->user_interface) {
247 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
248 "returned user %p, expected user %p\n",
249 msg->user, ipmi_device->user_interface);
250 ipmi_free_recv_msg(msg);
251 return;
252 }
253 mutex_lock(&ipmi_device->tx_msg_lock);
254 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
255 if (msg->msgid == tx_msg->tx_msgid) {
256 msg_found = 1;
257 break;
258 }
259 }
260
261 mutex_unlock(&ipmi_device->tx_msg_lock);
262 if (!msg_found) {
263 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
264 "returned.\n", msg->msgid);
265 ipmi_free_recv_msg(msg);
266 return;
267 }
268
269 if (msg->msg.data_len) {
270 /* copy the response data to Rx_data buffer */
271 memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
272 tx_msg->rx_len = msg->msg.data_len;
273 tx_msg->msg_done = 1;
274 }
275 complete(&tx_msg->tx_complete);
276 ipmi_free_recv_msg(msg);
277};
278
279static void ipmi_register_bmc(int iface, struct device *dev)
280{
281 struct acpi_ipmi_device *ipmi_device, *temp;
282 struct pnp_dev *pnp_dev;
283 ipmi_user_t user;
284 int err;
285 struct ipmi_smi_info smi_data;
286 acpi_handle handle;
287
288 err = ipmi_get_smi_info(iface, &smi_data);
289
290 if (err)
291 return;
292
293 if (smi_data.addr_src != SI_ACPI) {
294 put_device(smi_data.dev);
295 return;
296 }
297
298 handle = smi_data.addr_info.acpi_info.acpi_handle;
299
300 mutex_lock(&driver_data.ipmi_lock);
301 list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
302 /*
303 * if the corresponding ACPI handle is already added
304 * to the device list, don't add it again.
305 */
306 if (temp->handle == handle)
307 goto out;
308 }
309
310 ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
311
312 if (!ipmi_device)
313 goto out;
314
315 pnp_dev = to_pnp_dev(smi_data.dev);
316 ipmi_device->handle = handle;
317 ipmi_device->pnp_dev = pnp_dev;
318
319 err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
320 ipmi_device, &user);
321 if (err) {
322 dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
323 kfree(ipmi_device);
324 goto out;
325 }
326 acpi_add_ipmi_device(ipmi_device);
327 ipmi_device->user_interface = user;
328 ipmi_device->ipmi_ifnum = iface;
329 mutex_unlock(&driver_data.ipmi_lock);
330 memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
331 return;
332
333out:
334 mutex_unlock(&driver_data.ipmi_lock);
335 put_device(smi_data.dev);
336 return;
337}
338
339static void ipmi_bmc_gone(int iface)
340{
341 struct acpi_ipmi_device *ipmi_device, *temp;
342
343 mutex_lock(&driver_data.ipmi_lock);
344 list_for_each_entry_safe(ipmi_device, temp,
345 &driver_data.ipmi_devices, head) {
346 if (ipmi_device->ipmi_ifnum != iface)
347 continue;
348
349 acpi_remove_ipmi_device(ipmi_device);
350 put_device(ipmi_device->smi_data.dev);
351 kfree(ipmi_device);
352 break;
353 }
354 mutex_unlock(&driver_data.ipmi_lock);
355}
356/* --------------------------------------------------------------------------
357 * Address Space Management
358 * -------------------------------------------------------------------------- */
359/*
360 * This is the IPMI opregion space handler.
361 * @function: indicates the read/write. In fact as the IPMI message is driven
362 * by command, only write is meaningful.
363 * @address: This contains the netfn/command of IPMI request message.
364 * @bits : not used.
365 * @value : it is an in/out parameter. It points to the IPMI message buffer.
366 * Before the IPMI message is sent, it represents the actual request
367 * IPMI message. After the IPMI message is finished, it represents
368 * the response IPMI message returned by IPMI command.
369 * @handler_context: IPMI device context.
370 */
371
372static acpi_status
373acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
374 u32 bits, acpi_integer *value,
375 void *handler_context, void *region_context)
376{
377 struct acpi_ipmi_msg *tx_msg;
378 struct acpi_ipmi_device *ipmi_device = handler_context;
379 int err, rem_time;
380 acpi_status status;
381 /*
382 * IPMI opregion message.
383 * IPMI message is firstly written to the BMC and system software
384 * can get the respsonse. So it is unmeaningful for the read access
385 * of IPMI opregion.
386 */
387 if ((function & ACPI_IO_MASK) == ACPI_READ)
388 return AE_TYPE;
389
390 if (!ipmi_device->user_interface)
391 return AE_NOT_EXIST;
392
393 tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
394 if (!tx_msg)
395 return AE_NO_MEMORY;
396
397 acpi_format_ipmi_msg(tx_msg, address, value);
398 mutex_lock(&ipmi_device->tx_msg_lock);
399 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
400 mutex_unlock(&ipmi_device->tx_msg_lock);
401 err = ipmi_request_settime(ipmi_device->user_interface,
402 &tx_msg->addr,
403 tx_msg->tx_msgid,
404 &tx_msg->tx_message,
405 NULL, 0, 0, 0);
406 if (err) {
407 status = AE_ERROR;
408 goto end_label;
409 }
410 rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
411 IPMI_TIMEOUT);
412 acpi_format_ipmi_response(tx_msg, value, rem_time);
413 status = AE_OK;
414
415end_label:
416 mutex_lock(&ipmi_device->tx_msg_lock);
417 list_del(&tx_msg->head);
418 mutex_unlock(&ipmi_device->tx_msg_lock);
419 kfree(tx_msg);
420 return status;
421}
422
423static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
424{
425 if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
426 return;
427
428 acpi_remove_address_space_handler(ipmi->handle,
429 ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
430
431 clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
432}
433
434static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
435{
436 acpi_status status;
437
438 if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
439 return 0;
440
441 status = acpi_install_address_space_handler(ipmi->handle,
442 ACPI_ADR_SPACE_IPMI,
443 &acpi_ipmi_space_handler,
444 NULL, ipmi);
445 if (ACPI_FAILURE(status)) {
446 struct pnp_dev *pnp_dev = ipmi->pnp_dev;
447 dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
448 "handle\n");
449 return -EINVAL;
450 }
451 set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
452 return 0;
453}
454
455static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
456{
457
458 INIT_LIST_HEAD(&ipmi_device->head);
459
460 mutex_init(&ipmi_device->tx_msg_lock);
461 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
462 ipmi_install_space_handler(ipmi_device);
463
464 list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
465}
466
467static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
468{
469 /*
470 * If the IPMI user interface is created, it should be
471 * destroyed.
472 */
473 if (ipmi_device->user_interface) {
474 ipmi_destroy_user(ipmi_device->user_interface);
475 ipmi_device->user_interface = NULL;
476 }
477 /* flush the Tx_msg list */
478 if (!list_empty(&ipmi_device->tx_msg_list))
479 ipmi_flush_tx_msg(ipmi_device);
480
481 list_del(&ipmi_device->head);
482 ipmi_remove_space_handler(ipmi_device);
483}
484
485static int __init acpi_ipmi_init(void)
486{
487 int result = 0;
488
489 if (acpi_disabled)
490 return result;
491
492 mutex_init(&driver_data.ipmi_lock);
493
494 result = ipmi_smi_watcher_register(&driver_data.bmc_events);
495
496 return result;
497}
498
499static void __exit acpi_ipmi_exit(void)
500{
501 struct acpi_ipmi_device *ipmi_device, *temp;
502
503 if (acpi_disabled)
504 return;
505
506 ipmi_smi_watcher_unregister(&driver_data.bmc_events);
507
508 /*
509 * When one smi_watcher is unregistered, it is only deleted
510 * from the smi_watcher list. But the smi_gone callback function
511 * is not called. So explicitly uninstall the ACPI IPMI oregion
512 * handler and free it.
513 */
514 mutex_lock(&driver_data.ipmi_lock);
515 list_for_each_entry_safe(ipmi_device, temp,
516 &driver_data.ipmi_devices, head) {
517 acpi_remove_ipmi_device(ipmi_device);
518 put_device(ipmi_device->smi_data.dev);
519 kfree(ipmi_device);
520 }
521 mutex_unlock(&driver_data.ipmi_lock);
522}
523
524module_init(acpi_ipmi_init);
525module_exit(acpi_ipmi_exit);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a7e1d1aa4107..eec2eadd2431 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o 17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
18 18
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 3e50c74ed4a1..e0ba17f0a7c8 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index b17d8de9f6ff..ab87396c2c07 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 72e9d5eb083c..eb0b1f8dee6d 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 894a0ff2a946..666271b65418 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a6f99cc37a19..41d247daf461 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,8 +51,6 @@ acpi_status acpi_ev_initialize_events(void);
51 51
52acpi_status acpi_ev_install_xrupt_handlers(void); 52acpi_status acpi_ev_install_xrupt_handlers(void);
53 53
54acpi_status acpi_ev_install_fadt_gpes(void);
55
56u32 acpi_ev_fixed_event_detect(void); 54u32 acpi_ev_fixed_event_detect(void);
57 55
58/* 56/*
@@ -82,9 +80,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
82 80
83acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 81acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
84 82
85acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 83acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
86 84
87acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); 85acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
88 86
89struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 87struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
90 u32 gpe_number); 88 u32 gpe_number);
@@ -93,6 +91,8 @@ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
93 struct acpi_gpe_block_info 91 struct acpi_gpe_block_info
94 *gpe_block); 92 *gpe_block);
95 93
94acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
95
96/* 96/*
97 * evgpeblk - Upper-level GPE block support 97 * evgpeblk - Upper-level GPE block support
98 */ 98 */
@@ -107,12 +107,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
107acpi_status 107acpi_status
108acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 108acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
109 struct acpi_gpe_block_info *gpe_block, 109 struct acpi_gpe_block_info *gpe_block,
110 void *ignored); 110 void *context);
111 111
112acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); 112acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
113 113
114u32 114u32
115acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, 115acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
116 struct acpi_gpe_event_info *gpe_event_info,
116 u32 gpe_number); 117 u32 gpe_number);
117 118
118/* 119/*
@@ -126,10 +127,6 @@ acpi_status
126acpi_ev_match_gpe_method(acpi_handle obj_handle, 127acpi_ev_match_gpe_method(acpi_handle obj_handle,
127 u32 level, void *context, void **return_value); 128 u32 level, void *context, void **return_value);
128 129
129acpi_status
130acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
131 u32 level, void *context, void **return_value);
132
133/* 130/*
134 * evgpeutil - GPE utilities 131 * evgpeutil - GPE utilities
135 */ 132 */
@@ -138,6 +135,10 @@ acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
138 135
139u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); 136u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
140 137
138acpi_status
139acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
140 struct acpi_gpe_block_info *gpe_block, void *context);
141
141struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); 142struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
142 143
143acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt); 144acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ad88fcae4eb9..82a1bd283db8 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -146,6 +146,9 @@ u8 acpi_gbl_system_awake_and_running;
146 146
147extern u32 acpi_gbl_nesting_level; 147extern u32 acpi_gbl_nesting_level;
148 148
149ACPI_EXTERN u32 acpi_gpe_count;
150ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
151
149/* Support for dynamic control method tracing mechanism */ 152/* Support for dynamic control method tracing mechanism */
150 153
151ACPI_EXTERN u32 acpi_gbl_original_dbg_level; 154ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -225,8 +228,10 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_present;
225 */ 228 */
226ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 229ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
227ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 230ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
231ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
228#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock 232#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
229#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock 233#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
234#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
230 235
231/***************************************************************************** 236/*****************************************************************************
232 * 237 *
@@ -370,7 +375,9 @@ ACPI_EXTERN struct acpi_fixed_event_handler
370ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 375ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
371ACPI_EXTERN struct acpi_gpe_block_info 376ACPI_EXTERN struct acpi_gpe_block_info
372*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 377*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
373ACPI_EXTERN u8 acpi_all_gpes_initialized; 378ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
379ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
380ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
374 381
375/***************************************************************************** 382/*****************************************************************************
376 * 383 *
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 167470ad2d21..e7213beaafc7 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -94,7 +94,7 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
94 struct acpi_gpe_register_info *gpe_register_info); 94 struct acpi_gpe_register_info *gpe_register_info);
95 95
96acpi_status 96acpi_status
97acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); 97acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
98 98
99acpi_status 99acpi_status
100acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 100acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 049e203bd621..3731e1c34b83 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ceb0c05b2d7..54784bb42cec 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -408,17 +408,18 @@ struct acpi_predefined_data {
408 408
409/* Dispatch info for each GPE -- either a method or handler, cannot be both */ 409/* Dispatch info for each GPE -- either a method or handler, cannot be both */
410 410
411struct acpi_handler_info { 411struct acpi_gpe_handler_info {
412 acpi_event_handler address; /* Address of handler, if any */ 412 acpi_gpe_handler address; /* Address of handler, if any */
413 void *context; /* Context to be passed to handler */ 413 void *context; /* Context to be passed to handler */
414 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 414 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
415 u8 orig_flags; /* Original misc info about this GPE */ 415 u8 original_flags; /* Original (pre-handler) GPE info */
416 u8 orig_enabled; /* Set if the GPE was originally enabled */ 416 u8 originally_enabled; /* True if GPE was originally enabled */
417}; 417};
418 418
419union acpi_gpe_dispatch_info { 419union acpi_gpe_dispatch_info {
420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
421 struct acpi_handler_info *handler; 421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
422 struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
422}; 423};
423 424
424/* 425/*
@@ -458,7 +459,7 @@ struct acpi_gpe_block_info {
458 u32 register_count; /* Number of register pairs in block */ 459 u32 register_count; /* Number of register pairs in block */
459 u16 gpe_count; /* Number of individual GPEs in block */ 460 u16 gpe_count; /* Number of individual GPEs in block */
460 u8 block_base_number; /* Base GPE number for this block */ 461 u8 block_base_number; /* Base GPE number for this block */
461 u8 initialized; /* If set, the GPE block has been initialized */ 462 u8 initialized; /* TRUE if this block is initialized */
462}; 463};
463 464
464/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */ 465/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 8d5c9e0a495f..b7491ee1fba6 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index d44d3bc5b847..79a598c67fe3 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 962a3ccff6fd..1055769f2f01 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -97,8 +97,6 @@
97#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */ 97#define AOPOBJ_OBJECT_INITIALIZED 0x08 /* Region is initialized, _REG was run */
98#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */ 98#define AOPOBJ_SETUP_COMPLETE 0x10 /* Region setup is complete */
99#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */ 99#define AOPOBJ_INVALID 0x20 /* Host OS won't allow a Region address */
100#define AOPOBJ_MODULE_LEVEL 0x40 /* Method is actually module-level code */
101#define AOPOBJ_MODIFIED_NAMESPACE 0x80 /* Method modified the namespace */
102 100
103/****************************************************************************** 101/******************************************************************************
104 * 102 *
@@ -175,7 +173,7 @@ struct acpi_object_region {
175}; 173};
176 174
177struct acpi_object_method { 175struct acpi_object_method {
178 ACPI_OBJECT_COMMON_HEADER u8 method_flags; 176 ACPI_OBJECT_COMMON_HEADER u8 info_flags;
179 u8 param_count; 177 u8 param_count;
180 u8 sync_level; 178 u8 sync_level;
181 union acpi_operand_object *mutex; 179 union acpi_operand_object *mutex;
@@ -183,13 +181,21 @@ struct acpi_object_method {
183 union { 181 union {
184 ACPI_INTERNAL_METHOD implementation; 182 ACPI_INTERNAL_METHOD implementation;
185 union acpi_operand_object *handler; 183 union acpi_operand_object *handler;
186 } extra; 184 } dispatch;
187 185
188 u32 aml_length; 186 u32 aml_length;
189 u8 thread_count; 187 u8 thread_count;
190 acpi_owner_id owner_id; 188 acpi_owner_id owner_id;
191}; 189};
192 190
191/* Flags for info_flags field above */
192
193#define ACPI_METHOD_MODULE_LEVEL 0x01 /* Method is actually module-level code */
194#define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */
195#define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */
196#define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */
197#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */
198
193/****************************************************************************** 199/******************************************************************************
194 * 200 *
195 * Objects that can be notified. All share a common notify_info area. 201 * Objects that can be notified. All share a common notify_info area.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8c15ff43f42b..bb2ccfad7376 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index d0bb0fd3e57a..5ea1e06afa20 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 10998d369ad0..94e73c97cf85 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 528bcbaf4ce7..f08b55b7f3a0 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6e5dd97949fe..1623b245dde2 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 62a576e34361..967f08124eba 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 72e4183c1937..99c140d8e348 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1f484ba228fc..f4f0998d3967 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2010, Intel Corp. 10 * Copyright (C) 2000 - 2011, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -480,16 +480,10 @@ typedef enum {
480 AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D 480 AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
481} AML_ACCESS_ATTRIBUTE; 481} AML_ACCESS_ATTRIBUTE;
482 482
483/* Bit fields in method_flags byte */ 483/* Bit fields in the AML method_flags byte */
484 484
485#define AML_METHOD_ARG_COUNT 0x07 485#define AML_METHOD_ARG_COUNT 0x07
486#define AML_METHOD_SERIALIZED 0x08 486#define AML_METHOD_SERIALIZED 0x08
487#define AML_METHOD_SYNC_LEVEL 0xF0 487#define AML_METHOD_SYNC_LEVEL 0xF0
488 488
489/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
490
491#define AML_METHOD_INTERNAL_ONLY 0x01
492#define AML_METHOD_RESERVED1 0x02
493#define AML_METHOD_RESERVED2 0x04
494
495#endif /* __AMLCODE_H__ */ 489#endif /* __AMLCODE_H__ */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 0e5798fcbb19..59122cde247c 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 347bee1726f1..34be60c0e448 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index cc4a38c57558..a7718bf2b9a1 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d94dd8974b55..5d797751e205 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "amlcode.h"
47#include "acdispat.h" 46#include "acdispat.h"
48#include "acinterp.h" 47#include "acinterp.h"
49#include "acnamesp.h" 48#include "acnamesp.h"
@@ -201,7 +200,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
201 /* 200 /*
202 * If this method is serialized, we need to acquire the method mutex. 201 * If this method is serialized, we need to acquire the method mutex.
203 */ 202 */
204 if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) { 203 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
205 /* 204 /*
206 * Create a mutex for the method if it is defined to be Serialized 205 * Create a mutex for the method if it is defined to be Serialized
207 * and a mutex has not already been created. We defer the mutex creation 206 * and a mutex has not already been created. We defer the mutex creation
@@ -413,8 +412,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
413 412
414 /* Invoke an internal method if necessary */ 413 /* Invoke an internal method if necessary */
415 414
416 if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { 415 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
417 status = obj_desc->method.extra.implementation(next_walk_state); 416 status =
417 obj_desc->method.dispatch.implementation(next_walk_state);
418 if (status == AE_OK) { 418 if (status == AE_OK) {
419 status = AE_CTRL_TERMINATE; 419 status = AE_CTRL_TERMINATE;
420 } 420 }
@@ -579,11 +579,14 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
579 579
580 /* 580 /*
581 * Delete any namespace objects created anywhere within the 581 * Delete any namespace objects created anywhere within the
582 * namespace by the execution of this method. Unless this method 582 * namespace by the execution of this method. Unless:
583 * is a module-level executable code method, in which case we 583 * 1) This method is a module-level executable code method, in which
584 * want make the objects permanent. 584 * case we want make the objects permanent.
585 * 2) There are other threads executing the method, in which case we
586 * will wait until the last thread has completed.
585 */ 587 */
586 if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) { 588 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
589 && (method_desc->method.thread_count == 1)) {
587 590
588 /* Delete any direct children of (created by) this method */ 591 /* Delete any direct children of (created by) this method */
589 592
@@ -593,12 +596,17 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
593 /* 596 /*
594 * Delete any objects that were created by this method 597 * Delete any objects that were created by this method
595 * elsewhere in the namespace (if any were created). 598 * elsewhere in the namespace (if any were created).
599 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
600 * deletion such that we don't have to perform an entire
601 * namespace walk for every control method execution.
596 */ 602 */
597 if (method_desc->method. 603 if (method_desc->method.
598 flags & AOPOBJ_MODIFIED_NAMESPACE) { 604 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
599 acpi_ns_delete_namespace_by_owner(method_desc-> 605 acpi_ns_delete_namespace_by_owner(method_desc->
600 method. 606 method.
601 owner_id); 607 owner_id);
608 method_desc->method.info_flags &=
609 ~ACPI_METHOD_MODIFIED_NAMESPACE;
602 } 610 }
603 } 611 }
604 } 612 }
@@ -629,19 +637,43 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
629 * Serialized if it appears that the method is incorrectly written and 637 * Serialized if it appears that the method is incorrectly written and
630 * does not support multiple thread execution. The best example of this 638 * does not support multiple thread execution. The best example of this
631 * is if such a method creates namespace objects and blocks. A second 639 * is if such a method creates namespace objects and blocks. A second
632 * thread will fail with an AE_ALREADY_EXISTS exception 640 * thread will fail with an AE_ALREADY_EXISTS exception.
633 * 641 *
634 * This code is here because we must wait until the last thread exits 642 * This code is here because we must wait until the last thread exits
635 * before creating the synchronization semaphore. 643 * before marking the method as serialized.
636 */ 644 */
637 if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED) 645 if (method_desc->method.
638 && (!method_desc->method.mutex)) { 646 info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
639 (void)acpi_ds_create_method_mutex(method_desc); 647 if (walk_state) {
648 ACPI_INFO((AE_INFO,
649 "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
650 walk_state->method_node->name.
651 ascii));
652 }
653
654 /*
655 * Method tried to create an object twice and was marked as
656 * "pending serialized". The probable cause is that the method
657 * cannot handle reentrancy.
658 *
659 * The method was created as not_serialized, but it tried to create
660 * a named object and then blocked, causing the second thread
661 * entrance to begin and then fail. Workaround this problem by
662 * marking the method permanently as Serialized when the last
663 * thread exits here.
664 */
665 method_desc->method.info_flags &=
666 ~ACPI_METHOD_SERIALIZED_PENDING;
667 method_desc->method.info_flags |=
668 ACPI_METHOD_SERIALIZED;
669 method_desc->method.sync_level = 0;
640 } 670 }
641 671
642 /* No more threads, we can free the owner_id */ 672 /* No more threads, we can free the owner_id */
643 673
644 if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) { 674 if (!
675 (method_desc->method.
676 info_flags & ACPI_METHOD_MODULE_LEVEL)) {
645 acpi_ut_release_owner_id(&method_desc->method.owner_id); 677 acpi_ut_release_owner_id(&method_desc->method.owner_id);
646 } 678 }
647 } 679 }
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8095306fcd8c..905ce29a92e1 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8e85f54a8e0e..f42e17e5c252 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 7c0e74227171..bbecf293aeeb 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 15135c25aa9b..2c477ce172fa 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b0b5d08d97a..fe40e4c6554f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 140a9d002959..52566ff5e903 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d1e701709dac..76a661fc1e09 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 83155dd8671e..a6c374ef9914 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c61c3039c31a..d458b041e651 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,17 @@ u32 acpi_ev_fixed_event_detect(void)
217 status_bit_mask) 217 status_bit_mask)
218 && (fixed_enable & acpi_gbl_fixed_event_info[i]. 218 && (fixed_enable & acpi_gbl_fixed_event_info[i].
219 enable_bit_mask)) { 219 enable_bit_mask)) {
220 /*
221 * Found an active (signalled) event. Invoke global event
222 * handler if present.
223 */
224 acpi_fixed_event_count[i]++;
225 if (acpi_gbl_global_event_handler) {
226 acpi_gbl_global_event_handler
227 (ACPI_EVENT_TYPE_FIXED, NULL, i,
228 acpi_gbl_global_event_handler_context);
229 }
220 230
221 /* Found an active (signalled) event */
222 acpi_os_fixed_event_count(i);
223 int_status |= acpi_ev_fixed_event_dispatch(i); 231 int_status |= acpi_ev_fixed_event_dispatch(i);
224 } 232 }
225 } 233 }
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f226eac314db..14988a86066f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,8 @@ ACPI_MODULE_NAME("evgpe")
52/* Local prototypes */ 52/* Local prototypes */
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
55/******************************************************************************* 57/*******************************************************************************
56 * 58 *
57 * FUNCTION: acpi_ev_update_gpe_enable_mask 59 * FUNCTION: acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
102 * 104 *
103 * RETURN: Status 105 * RETURN: Status
104 * 106 *
105 * DESCRIPTION: Clear the given GPE from stale events and enable it. 107 * DESCRIPTION: Clear a GPE of stale events and enable it.
106 * 108 *
107 ******************************************************************************/ 109 ******************************************************************************/
108acpi_status 110acpi_status
@@ -113,12 +115,13 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
113 ACPI_FUNCTION_TRACE(ev_enable_gpe); 115 ACPI_FUNCTION_TRACE(ev_enable_gpe);
114 116
115 /* 117 /*
116 * We will only allow a GPE to be enabled if it has either an 118 * We will only allow a GPE to be enabled if it has either an associated
117 * associated method (_Lxx/_Exx) or a handler. Otherwise, the 119 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
118 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the 120 * feature. Otherwise, the GPE will be immediately disabled by
119 * first time it fires. 121 * acpi_ev_gpe_dispatch the first time it fires.
120 */ 122 */
121 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { 123 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124 ACPI_GPE_DISPATCH_NONE) {
122 return_ACPI_STATUS(AE_NO_HANDLER); 125 return_ACPI_STATUS(AE_NO_HANDLER);
123 } 126 }
124 127
@@ -137,9 +140,9 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
137 140
138/******************************************************************************* 141/*******************************************************************************
139 * 142 *
140 * FUNCTION: acpi_raw_enable_gpe 143 * FUNCTION: acpi_ev_add_gpe_reference
141 * 144 *
142 * PARAMETERS: gpe_event_info - GPE to enable 145 * PARAMETERS: gpe_event_info - Add a reference to this GPE
143 * 146 *
144 * RETURN: Status 147 * RETURN: Status
145 * 148 *
@@ -148,16 +151,21 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
148 * 151 *
149 ******************************************************************************/ 152 ******************************************************************************/
150 153
151acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 154acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
152{ 155{
153 acpi_status status = AE_OK; 156 acpi_status status = AE_OK;
154 157
158 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
159
155 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 160 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
156 return_ACPI_STATUS(AE_LIMIT); 161 return_ACPI_STATUS(AE_LIMIT);
157 } 162 }
158 163
159 gpe_event_info->runtime_count++; 164 gpe_event_info->runtime_count++;
160 if (gpe_event_info->runtime_count == 1) { 165 if (gpe_event_info->runtime_count == 1) {
166
167 /* Enable on first reference */
168
161 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 169 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
162 if (ACPI_SUCCESS(status)) { 170 if (ACPI_SUCCESS(status)) {
163 status = acpi_ev_enable_gpe(gpe_event_info); 171 status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
173 181
174/******************************************************************************* 182/*******************************************************************************
175 * 183 *
176 * FUNCTION: acpi_raw_disable_gpe 184 * FUNCTION: acpi_ev_remove_gpe_reference
177 * 185 *
178 * PARAMETERS: gpe_event_info - GPE to disable 186 * PARAMETERS: gpe_event_info - Remove a reference to this GPE
179 * 187 *
180 * RETURN: Status 188 * RETURN: Status
181 * 189 *
@@ -184,16 +192,21 @@ acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
184 * 192 *
185 ******************************************************************************/ 193 ******************************************************************************/
186 194
187acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) 195acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
188{ 196{
189 acpi_status status = AE_OK; 197 acpi_status status = AE_OK;
190 198
199 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
200
191 if (!gpe_event_info->runtime_count) { 201 if (!gpe_event_info->runtime_count) {
192 return_ACPI_STATUS(AE_LIMIT); 202 return_ACPI_STATUS(AE_LIMIT);
193 } 203 }
194 204
195 gpe_event_info->runtime_count--; 205 gpe_event_info->runtime_count--;
196 if (!gpe_event_info->runtime_count) { 206 if (!gpe_event_info->runtime_count) {
207
208 /* Disable on last reference */
209
197 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 210 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
198 if (ACPI_SUCCESS(status)) { 211 if (ACPI_SUCCESS(status)) {
199 status = acpi_hw_low_set_gpe(gpe_event_info, 212 status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
379 } 392 }
380 393
381 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 394 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
382 "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", 395 "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
383 gpe_register_info->base_gpe_number, 396 gpe_register_info->base_gpe_number,
384 status_reg, enable_reg)); 397 status_reg, enable_reg));
385 398
@@ -405,7 +418,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
405 * or method. 418 * or method.
406 */ 419 */
407 int_status |= 420 int_status |=
408 acpi_ev_gpe_dispatch(&gpe_block-> 421 acpi_ev_gpe_dispatch(gpe_block->
422 node,
423 &gpe_block->
409 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 424 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
410 } 425 }
411 } 426 }
@@ -435,19 +450,28 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
435 * an interrupt handler. 450 * an interrupt handler.
436 * 451 *
437 ******************************************************************************/ 452 ******************************************************************************/
438static void acpi_ev_asynch_enable_gpe(void *context);
439 453
440static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 454static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
441{ 455{
442 struct acpi_gpe_event_info *gpe_event_info = (void *)context; 456 struct acpi_gpe_event_info *gpe_event_info = context;
443 acpi_status status; 457 acpi_status status;
444 struct acpi_gpe_event_info local_gpe_event_info; 458 struct acpi_gpe_event_info *local_gpe_event_info;
445 struct acpi_evaluate_info *info; 459 struct acpi_evaluate_info *info;
446 460
447 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 461 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
448 462
463 /* Allocate a local GPE block */
464
465 local_gpe_event_info =
466 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
467 if (!local_gpe_event_info) {
468 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
469 return_VOID;
470 }
471
449 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 472 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
450 if (ACPI_FAILURE(status)) { 473 if (ACPI_FAILURE(status)) {
474 ACPI_FREE(local_gpe_event_info);
451 return_VOID; 475 return_VOID;
452 } 476 }
453 477
@@ -455,6 +479,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
455 479
456 if (!acpi_ev_valid_gpe_event(gpe_event_info)) { 480 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
457 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 481 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
482 ACPI_FREE(local_gpe_event_info);
458 return_VOID; 483 return_VOID;
459 } 484 }
460 485
@@ -462,7 +487,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
462 * Take a snapshot of the GPE info for this level - we copy the info to 487 * Take a snapshot of the GPE info for this level - we copy the info to
463 * prevent a race condition with remove_handler/remove_block. 488 * prevent a race condition with remove_handler/remove_block.
464 */ 489 */
465 ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, 490 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
466 sizeof(struct acpi_gpe_event_info)); 491 sizeof(struct acpi_gpe_event_info));
467 492
468 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 493 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +495,26 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
470 return_VOID; 495 return_VOID;
471 } 496 }
472 497
473 /* 498 /* Do the correct dispatch - normal method or implicit notify */
474 * Must check for control method type dispatch one more time to avoid a 499
475 * race with ev_gpe_install_handler 500 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
476 */ 501 case ACPI_GPE_DISPATCH_NOTIFY:
477 if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == 502
478 ACPI_GPE_DISPATCH_METHOD) { 503 /*
504 * Implicit notify.
505 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
506 * NOTE: the request is queued for execution after this method
507 * completes. The notify handlers are NOT invoked synchronously
508 * from this thread -- because handlers may in turn run other
509 * control methods.
510 */
511 status =
512 acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
513 device_node,
514 ACPI_NOTIFY_DEVICE_WAKE);
515 break;
516
517 case ACPI_GPE_DISPATCH_METHOD:
479 518
480 /* Allocate the evaluation information block */ 519 /* Allocate the evaluation information block */
481 520
@@ -488,7 +527,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
488 * control method that corresponds to this GPE 527 * control method that corresponds to this GPE
489 */ 528 */
490 info->prefix_node = 529 info->prefix_node =
491 local_gpe_event_info.dispatch.method_node; 530 local_gpe_event_info->dispatch.method_node;
492 info->flags = ACPI_IGNORE_RETURN_VALUE; 531 info->flags = ACPI_IGNORE_RETURN_VALUE;
493 532
494 status = acpi_ns_evaluate(info); 533 status = acpi_ns_evaluate(info);
@@ -499,46 +538,98 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
499 ACPI_EXCEPTION((AE_INFO, status, 538 ACPI_EXCEPTION((AE_INFO, status,
500 "while evaluating GPE method [%4.4s]", 539 "while evaluating GPE method [%4.4s]",
501 acpi_ut_get_node_name 540 acpi_ut_get_node_name
502 (local_gpe_event_info.dispatch. 541 (local_gpe_event_info->dispatch.
503 method_node))); 542 method_node)));
504 } 543 }
544
545 break;
546
547 default:
548 return_VOID; /* Should never happen */
505 } 549 }
550
506 /* Defer enabling of GPE until all notify handlers are done */ 551 /* Defer enabling of GPE until all notify handlers are done */
507 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, 552
508 gpe_event_info); 553 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
554 acpi_ev_asynch_enable_gpe,
555 local_gpe_event_info);
556 if (ACPI_FAILURE(status)) {
557 ACPI_FREE(local_gpe_event_info);
558 }
509 return_VOID; 559 return_VOID;
510} 560}
511 561
512static void acpi_ev_asynch_enable_gpe(void *context) 562
563/*******************************************************************************
564 *
565 * FUNCTION: acpi_ev_asynch_enable_gpe
566 *
567 * PARAMETERS: Context (gpe_event_info) - Info for this GPE
568 * Callback from acpi_os_execute
569 *
570 * RETURN: None
571 *
572 * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
573 * complete (i.e., finish execution of Notify)
574 *
575 ******************************************************************************/
576
577static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
513{ 578{
514 struct acpi_gpe_event_info *gpe_event_info = context; 579 struct acpi_gpe_event_info *gpe_event_info = context;
580
581 (void)acpi_ev_finish_gpe(gpe_event_info);
582
583 ACPI_FREE(gpe_event_info);
584 return;
585}
586
587
588/*******************************************************************************
589 *
590 * FUNCTION: acpi_ev_finish_gpe
591 *
592 * PARAMETERS: gpe_event_info - Info for this GPE
593 *
594 * RETURN: Status
595 *
596 * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
597 * of a GPE method or a synchronous or asynchronous GPE handler.
598 *
599 ******************************************************************************/
600
601acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
602{
515 acpi_status status; 603 acpi_status status;
604
516 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 605 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
517 ACPI_GPE_LEVEL_TRIGGERED) { 606 ACPI_GPE_LEVEL_TRIGGERED) {
518 /* 607 /*
519 * GPE is level-triggered, we clear the GPE status bit after handling 608 * GPE is level-triggered, we clear the GPE status bit after
520 * the event. 609 * handling the event.
521 */ 610 */
522 status = acpi_hw_clear_gpe(gpe_event_info); 611 status = acpi_hw_clear_gpe(gpe_event_info);
523 if (ACPI_FAILURE(status)) { 612 if (ACPI_FAILURE(status)) {
524 return_VOID; 613 return (status);
525 } 614 }
526 } 615 }
527 616
528 /* 617 /*
529 * Enable this GPE, conditionally. This means that the GPE will only be 618 * Enable this GPE, conditionally. This means that the GPE will
530 * physically enabled if the enable_for_run bit is set in the event_info 619 * only be physically enabled if the enable_for_run bit is set
620 * in the event_info.
531 */ 621 */
532 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); 622 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
533 623 return (AE_OK);
534 return_VOID;
535} 624}
536 625
626
537/******************************************************************************* 627/*******************************************************************************
538 * 628 *
539 * FUNCTION: acpi_ev_gpe_dispatch 629 * FUNCTION: acpi_ev_gpe_dispatch
540 * 630 *
541 * PARAMETERS: gpe_event_info - Info for this GPE 631 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
632 * gpe_event_info - Info for this GPE
542 * gpe_number - Number relative to the parent GPE block 633 * gpe_number - Number relative to the parent GPE block
543 * 634 *
544 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 635 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +642,22 @@ static void acpi_ev_asynch_enable_gpe(void *context)
551 ******************************************************************************/ 642 ******************************************************************************/
552 643
553u32 644u32
554acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 645acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
646 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
555{ 647{
556 acpi_status status; 648 acpi_status status;
649 u32 return_value;
557 650
558 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 651 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
559 652
560 acpi_os_gpe_count(gpe_number); 653 /* Invoke global event handler if present */
654
655 acpi_gpe_count++;
656 if (acpi_gbl_global_event_handler) {
657 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
658 gpe_number,
659 acpi_gbl_global_event_handler_context);
660 }
561 661
562 /* 662 /*
563 * If edge-triggered, clear the GPE status bit now. Note that 663 * If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +668,55 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
568 status = acpi_hw_clear_gpe(gpe_event_info); 668 status = acpi_hw_clear_gpe(gpe_event_info);
569 if (ACPI_FAILURE(status)) { 669 if (ACPI_FAILURE(status)) {
570 ACPI_EXCEPTION((AE_INFO, status, 670 ACPI_EXCEPTION((AE_INFO, status,
571 "Unable to clear GPE[0x%2X]", 671 "Unable to clear GPE%02X", gpe_number));
572 gpe_number));
573 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 672 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
574 } 673 }
575 } 674 }
576 675
577 /* 676 /*
578 * Dispatch the GPE to either an installed handler, or the control method 677 * Always disable the GPE so that it does not keep firing before
579 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke 678 * any asynchronous activity completes (either from the execution
580 * it and do not attempt to run the method. If there is neither a handler 679 * of a GPE method or an asynchronous GPE handler.)
581 * nor a method, we disable this GPE to prevent further such pointless 680 *
582 * events from firing. 681 * If there is no handler or method to run, just disable the
682 * GPE and leave it disabled permanently to prevent further such
683 * pointless events from firing.
684 */
685 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
686 if (ACPI_FAILURE(status)) {
687 ACPI_EXCEPTION((AE_INFO, status,
688 "Unable to disable GPE%02X", gpe_number));
689 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
690 }
691
692 /*
693 * Dispatch the GPE to either an installed handler or the control
694 * method associated with this GPE (_Lxx or _Exx). If a handler
695 * exists, we invoke it and do not attempt to run the method.
696 * If there is neither a handler nor a method, leave the GPE
697 * disabled.
583 */ 698 */
584 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 699 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
585 case ACPI_GPE_DISPATCH_HANDLER: 700 case ACPI_GPE_DISPATCH_HANDLER:
586 701
587 /* 702 /* Invoke the installed handler (at interrupt level) */
588 * Invoke the installed handler (at interrupt level)
589 * Ignore return status for now.
590 * TBD: leave GPE disabled on error?
591 */
592 (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
593 dispatch.
594 handler->
595 context);
596 703
597 /* It is now safe to clear level-triggered events. */ 704 return_value =
705 gpe_event_info->dispatch.handler->address(gpe_device,
706 gpe_number,
707 gpe_event_info->
708 dispatch.handler->
709 context);
598 710
599 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 711 /* If requested, clear (if level-triggered) and reenable the GPE */
600 ACPI_GPE_LEVEL_TRIGGERED) { 712
601 status = acpi_hw_clear_gpe(gpe_event_info); 713 if (return_value & ACPI_REENABLE_GPE) {
602 if (ACPI_FAILURE(status)) { 714 (void)acpi_ev_finish_gpe(gpe_event_info);
603 ACPI_EXCEPTION((AE_INFO, status,
604 "Unable to clear GPE[0x%2X]",
605 gpe_number));
606 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
607 }
608 } 715 }
609 break; 716 break;
610 717
611 case ACPI_GPE_DISPATCH_METHOD: 718 case ACPI_GPE_DISPATCH_METHOD:
612 719 case ACPI_GPE_DISPATCH_NOTIFY:
613 /*
614 * Disable the GPE, so it doesn't keep firing before the method has a
615 * chance to run (it runs asynchronously with interrupts enabled).
616 */
617 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
618 if (ACPI_FAILURE(status)) {
619 ACPI_EXCEPTION((AE_INFO, status,
620 "Unable to disable GPE[0x%2X]",
621 gpe_number));
622 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
623 }
624 720
625 /* 721 /*
626 * Execute the method associated with the GPE 722 * Execute the method associated with the GPE
@@ -631,7 +727,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
631 gpe_event_info); 727 gpe_event_info);
632 if (ACPI_FAILURE(status)) { 728 if (ACPI_FAILURE(status)) {
633 ACPI_EXCEPTION((AE_INFO, status, 729 ACPI_EXCEPTION((AE_INFO, status,
634 "Unable to queue handler for GPE[0x%2X] - event disabled", 730 "Unable to queue handler for GPE%2X - event disabled",
635 gpe_number)); 731 gpe_number));
636 } 732 }
637 break; 733 break;
@@ -644,20 +740,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
644 * a GPE to be enabled if it has no handler or method. 740 * a GPE to be enabled if it has no handler or method.
645 */ 741 */
646 ACPI_ERROR((AE_INFO, 742 ACPI_ERROR((AE_INFO,
647 "No handler or method for GPE[0x%2X], disabling event", 743 "No handler or method for GPE%02X, disabling event",
648 gpe_number)); 744 gpe_number));
649 745
650 /*
651 * Disable the GPE. The GPE will remain disabled a handler
652 * is installed or ACPICA is restarted.
653 */
654 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
655 if (ACPI_FAILURE(status)) {
656 ACPI_EXCEPTION((AE_INFO, status,
657 "Unable to disable GPE[0x%2X]",
658 gpe_number));
659 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
660 }
661 break; 746 break;
662 } 747 }
663 748
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 020add3eee1c..ca2c41a53311 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -361,9 +361,9 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
361 361
362 gpe_block->node = gpe_device; 362 gpe_block->node = gpe_device;
363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); 363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
364 gpe_block->initialized = FALSE;
364 gpe_block->register_count = register_count; 365 gpe_block->register_count = register_count;
365 gpe_block->block_base_number = gpe_block_base_number; 366 gpe_block->block_base_number = gpe_block_base_number;
366 gpe_block->initialized = FALSE;
367 367
368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, 368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
369 sizeof(struct acpi_generic_address)); 369 sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
386 return_ACPI_STATUS(status); 386 return_ACPI_STATUS(status);
387 } 387 }
388 388
389 acpi_all_gpes_initialized = FALSE; 389 acpi_gbl_all_gpes_initialized = FALSE;
390 390
391 /* Find all GPE methods (_Lxx or_Exx) for this block */ 391 /* Find all GPE methods (_Lxx or_Exx) for this block */
392 392
@@ -423,14 +423,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
423 * 423 *
424 * FUNCTION: acpi_ev_initialize_gpe_block 424 * FUNCTION: acpi_ev_initialize_gpe_block
425 * 425 *
426 * PARAMETERS: gpe_device - Handle to the parent GPE block 426 * PARAMETERS: acpi_gpe_callback
427 * gpe_block - Gpe Block info
428 * 427 *
429 * RETURN: Status 428 * RETURN: Status
430 * 429 *
431 * DESCRIPTION: Initialize and enable a GPE block. First find and run any 430 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
432 * _PRT methods associated with the block, then enable the 431 * associated methods.
433 * appropriate GPEs.
434 * Note: Assumes namespace is locked. 432 * Note: Assumes namespace is locked.
435 * 433 *
436 ******************************************************************************/ 434 ******************************************************************************/
@@ -450,8 +448,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
450 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 448 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
451 449
452 /* 450 /*
453 * Ignore a null GPE block (e.g., if no GPE block 1 exists) and 451 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
454 * GPE blocks that have been initialized already. 452 * any GPE blocks that have been initialized already.
455 */ 453 */
456 if (!gpe_block || gpe_block->initialized) { 454 if (!gpe_block || gpe_block->initialized) {
457 return_ACPI_STATUS(AE_OK); 455 return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
459 457
460 /* 458 /*
461 * Enable all GPEs that have a corresponding method and have the 459 * Enable all GPEs that have a corresponding method and have the
462 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must 460 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
463 * be enabled via the acpi_enable_gpe() interface. 461 * must be enabled via the acpi_enable_gpe() interface.
464 */ 462 */
465 gpe_enabled_count = 0; 463 gpe_enabled_count = 0;
466 464
@@ -472,14 +470,19 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
472 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; 470 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
473 gpe_event_info = &gpe_block->event_info[gpe_index]; 471 gpe_event_info = &gpe_block->event_info[gpe_index];
474 472
475 /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ 473 /*
476 474 * Ignore GPEs that have no corresponding _Lxx/_Exx method
477 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) 475 * and GPEs that are used to wake the system
476 */
477 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
478 ACPI_GPE_DISPATCH_NONE)
479 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
480 == ACPI_GPE_DISPATCH_HANDLER)
478 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 481 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
479 continue; 482 continue;
480 } 483 }
481 484
482 status = acpi_raw_enable_gpe(gpe_event_info); 485 status = acpi_ev_add_gpe_reference(gpe_event_info);
483 if (ACPI_FAILURE(status)) { 486 if (ACPI_FAILURE(status)) {
484 ACPI_EXCEPTION((AE_INFO, status, 487 ACPI_EXCEPTION((AE_INFO, status,
485 "Could not enable GPE 0x%02X", 488 "Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 4c8dea513b66..ce9aa9f9a972 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,11 +45,27 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
49 48
50#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evgpeinit") 50ACPI_MODULE_NAME("evgpeinit")
52 51
52/*
53 * Note: History of _PRW support in ACPICA
54 *
55 * Originally (2000 - 2010), the GPE initialization code performed a walk of
56 * the entire namespace to execute the _PRW methods and detect all GPEs
57 * capable of waking the system.
58 *
59 * As of 10/2010, the _PRW method execution has been removed since it is
60 * actually unnecessary. The host OS must in fact execute all _PRW methods
61 * in order to identify the device/power-resource dependencies. We now put
62 * the onus on the host OS to identify the wake GPEs as part of this process
63 * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
64 * not only reduces the complexity of the ACPICA initialization code, but in
65 * some cases (on systems with very large namespaces) it should reduce the
66 * kernel boot time as well.
67 */
68
53/******************************************************************************* 69/*******************************************************************************
54 * 70 *
55 * FUNCTION: acpi_ev_gpe_initialize 71 * FUNCTION: acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
222 acpi_status status = AE_OK; 238 acpi_status status = AE_OK;
223 239
224 /* 240 /*
225 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. 241 * Find any _Lxx/_Exx GPE methods that have just been loaded.
226 * 242 *
227 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately 243 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
228 * enabled. 244 * enabled.
@@ -235,9 +251,9 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
235 return; 251 return;
236 } 252 }
237 253
254 walk_info.count = 0;
238 walk_info.owner_id = table_owner_id; 255 walk_info.owner_id = table_owner_id;
239 walk_info.execute_by_owner_id = TRUE; 256 walk_info.execute_by_owner_id = TRUE;
240 walk_info.count = 0;
241 257
242 /* Walk the interrupt level descriptor list */ 258 /* Walk the interrupt level descriptor list */
243 259
@@ -298,7 +314,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
298 * xx - is the GPE number [in HEX] 314 * xx - is the GPE number [in HEX]
299 * 315 *
300 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods 316 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
301 * with that owner. 317 * with that owner.
302 * 318 *
303 ******************************************************************************/ 319 ******************************************************************************/
304 320
@@ -415,6 +431,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
415 * Add the GPE information from above to the gpe_event_info block for 431 * Add the GPE information from above to the gpe_event_info block for
416 * use during dispatch of this GPE. 432 * use during dispatch of this GPE.
417 */ 433 */
434 gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
418 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); 435 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
419 gpe_event_info->dispatch.method_node = method_node; 436 gpe_event_info->dispatch.method_node = method_node;
420 437
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 19a0e513ea48..80a81d0c4a80 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -154,6 +154,45 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
154 154
155/******************************************************************************* 155/*******************************************************************************
156 * 156 *
157 * FUNCTION: acpi_ev_get_gpe_device
158 *
159 * PARAMETERS: GPE_WALK_CALLBACK
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
164 * block device. NULL if the GPE is one of the FADT-defined GPEs.
165 *
166 ******************************************************************************/
167
168acpi_status
169acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
170 struct acpi_gpe_block_info *gpe_block, void *context)
171{
172 struct acpi_gpe_device_info *info = context;
173
174 /* Increment Index by the number of GPEs in this block */
175
176 info->next_block_base_index += gpe_block->gpe_count;
177
178 if (info->index < info->next_block_base_index) {
179 /*
180 * The GPE index is within this block, get the node. Leave the node
181 * NULL for the FADT-defined GPEs
182 */
183 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
184 info->gpe_device = gpe_block->node;
185 }
186
187 info->status = AE_OK;
188 return (AE_CTRL_END);
189 }
190
191 return (AE_OK);
192}
193
194/*******************************************************************************
195 *
157 * FUNCTION: acpi_ev_get_gpe_xrupt_block 196 * FUNCTION: acpi_ev_get_gpe_xrupt_block
158 * 197 *
159 * PARAMETERS: interrupt_number - Interrupt for a GPE block 198 * PARAMETERS: interrupt_number - Interrupt for a GPE block
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index fcaed9fb44ff..7dc80946f7bd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -284,41 +284,39 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
284 * RETURN: ACPI_INTERRUPT_HANDLED 284 * RETURN: ACPI_INTERRUPT_HANDLED
285 * 285 *
286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
287 * release interrupt occurs. Attempt to acquire the global lock, 287 * release interrupt occurs. If there's a thread waiting for
288 * if successful, signal the thread waiting for the lock. 288 * the global lock, signal it.
289 * 289 *
290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If 290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
291 * this is not possible for some reason, a separate thread will have to be 291 * this is not possible for some reason, a separate thread will have to be
292 * scheduled to do this. 292 * scheduled to do this.
293 * 293 *
294 ******************************************************************************/ 294 ******************************************************************************/
295static u8 acpi_ev_global_lock_pending;
295 296
296static u32 acpi_ev_global_lock_handler(void *context) 297static u32 acpi_ev_global_lock_handler(void *context)
297{ 298{
298 u8 acquired = FALSE; 299 acpi_status status;
300 acpi_cpu_flags flags;
299 301
300 /* 302 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
301 * Attempt to get the lock.
302 *
303 * If we don't get it now, it will be marked pending and we will
304 * take another interrupt when it becomes free.
305 */
306 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
307 if (acquired) {
308 303
309 /* Got the lock, now wake all threads waiting for it */ 304 if (!acpi_ev_global_lock_pending) {
305 goto out;
306 }
310 307
311 acpi_gbl_global_lock_acquired = TRUE; 308 /* Send a unit to the semaphore */
312 /* Send a unit to the semaphore */
313 309
314 if (ACPI_FAILURE 310 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
315 (acpi_os_signal_semaphore 311 if (ACPI_FAILURE(status)) {
316 (acpi_gbl_global_lock_semaphore, 1))) { 312 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
317 ACPI_ERROR((AE_INFO,
318 "Could not signal Global Lock semaphore"));
319 }
320 } 313 }
321 314
315 acpi_ev_global_lock_pending = FALSE;
316
317 out:
318 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
319
322 return (ACPI_INTERRUPT_HANDLED); 320 return (ACPI_INTERRUPT_HANDLED);
323} 321}
324 322
@@ -415,6 +413,7 @@ static int acpi_ev_global_lock_acquired;
415 413
416acpi_status acpi_ev_acquire_global_lock(u16 timeout) 414acpi_status acpi_ev_acquire_global_lock(u16 timeout)
417{ 415{
416 acpi_cpu_flags flags;
418 acpi_status status = AE_OK; 417 acpi_status status = AE_OK;
419 u8 acquired = FALSE; 418 u8 acquired = FALSE;
420 419
@@ -467,32 +466,47 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
467 return_ACPI_STATUS(AE_OK); 466 return_ACPI_STATUS(AE_OK);
468 } 467 }
469 468
470 /* Attempt to acquire the actual hardware lock */ 469 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
470
471 do {
472
473 /* Attempt to acquire the actual hardware lock */
474
475 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
476 if (acquired) {
477 acpi_gbl_global_lock_acquired = TRUE;
478
479 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
480 "Acquired hardware Global Lock\n"));
481 break;
482 }
471 483
472 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); 484 acpi_ev_global_lock_pending = TRUE;
473 if (acquired) {
474 485
475 /* We got the lock */ 486 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
476 487
488 /*
489 * Did not get the lock. The pending bit was set above, and we
490 * must wait until we get the global lock released interrupt.
491 */
477 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 492 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
478 "Acquired hardware Global Lock\n")); 493 "Waiting for hardware Global Lock\n"));
479 494
480 acpi_gbl_global_lock_acquired = TRUE; 495 /*
481 return_ACPI_STATUS(AE_OK); 496 * Wait for handshake with the global lock interrupt handler.
482 } 497 * This interface releases the interpreter if we must wait.
498 */
499 status = acpi_ex_system_wait_semaphore(
500 acpi_gbl_global_lock_semaphore,
501 ACPI_WAIT_FOREVER);
483 502
484 /* 503 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
485 * Did not get the lock. The pending bit was set above, and we must now
486 * wait until we get the global lock released interrupt.
487 */
488 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
489 504
490 /* 505 } while (ACPI_SUCCESS(status));
491 * Wait for handshake with the global lock interrupt handler. 506
492 * This interface releases the interpreter if we must wait. 507 acpi_ev_global_lock_pending = FALSE;
493 */ 508
494 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 509 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
495 ACPI_WAIT_FOREVER);
496 510
497 return_ACPI_STATUS(status); 511 return_ACPI_STATUS(status);
498} 512}
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 98fd210e87b2..785a5ee64585 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47a6dc9290..9659cee6093e 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -590,9 +590,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
590 * See acpi_ns_exec_module_code 590 * See acpi_ns_exec_module_code
591 */ 591 */
592 if (obj_desc->method. 592 if (obj_desc->method.
593 flags & AOPOBJ_MODULE_LEVEL) { 593 info_flags & ACPI_METHOD_MODULE_LEVEL) {
594 handler_obj = 594 handler_obj =
595 obj_desc->method.extra.handler; 595 obj_desc->method.dispatch.handler;
596 } 596 }
597 break; 597 break;
598 598
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8dfbaa96e422..2ebd40e1a3ef 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 36af222cac65..e1141402dbed 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,57 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
92 92
93ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) 93ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
94#endif /* ACPI_FUTURE_USAGE */ 94#endif /* ACPI_FUTURE_USAGE */
95
96/*******************************************************************************
97 *
98 * FUNCTION: acpi_install_global_event_handler
99 *
100 * PARAMETERS: Handler - Pointer to the global event handler function
101 * Context - Value passed to the handler on each event
102 *
103 * RETURN: Status
104 *
105 * DESCRIPTION: Saves the pointer to the handler function. The global handler
106 * is invoked upon each incoming GPE and Fixed Event. It is
107 * invoked at interrupt level at the time of the event dispatch.
108 * Can be used to update event counters, etc.
109 *
110 ******************************************************************************/
111acpi_status
112acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
113{
114 acpi_status status;
115
116 ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
117
118 /* Parameter validation */
119
120 if (!handler) {
121 return_ACPI_STATUS(AE_BAD_PARAMETER);
122 }
123
124 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
125 if (ACPI_FAILURE(status)) {
126 return_ACPI_STATUS(status);
127 }
128
129 /* Don't allow two handlers. */
130
131 if (acpi_gbl_global_event_handler) {
132 status = AE_ALREADY_EXISTS;
133 goto cleanup;
134 }
135
136 acpi_gbl_global_event_handler = handler;
137 acpi_gbl_global_event_handler_context = context;
138
139 cleanup:
140 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
141 return_ACPI_STATUS(status);
142}
143
144ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
145
95/******************************************************************************* 146/*******************************************************************************
96 * 147 *
97 * FUNCTION: acpi_install_fixed_event_handler 148 * FUNCTION: acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
671acpi_status 722acpi_status
672acpi_install_gpe_handler(acpi_handle gpe_device, 723acpi_install_gpe_handler(acpi_handle gpe_device,
673 u32 gpe_number, 724 u32 gpe_number,
674 u32 type, acpi_event_handler address, void *context) 725 u32 type, acpi_gpe_handler address, void *context)
675{ 726{
676 struct acpi_gpe_event_info *gpe_event_info; 727 struct acpi_gpe_event_info *gpe_event_info;
677 struct acpi_handler_info *handler; 728 struct acpi_gpe_handler_info *handler;
678 acpi_status status; 729 acpi_status status;
679 acpi_cpu_flags flags; 730 acpi_cpu_flags flags;
680 731
@@ -693,7 +744,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
693 744
694 /* Allocate memory for the handler object */ 745 /* Allocate memory for the handler object */
695 746
696 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); 747 handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
697 if (!handler) { 748 if (!handler) {
698 status = AE_NO_MEMORY; 749 status = AE_NO_MEMORY;
699 goto unlock_and_exit; 750 goto unlock_and_exit;
@@ -722,7 +773,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
722 handler->address = address; 773 handler->address = address;
723 handler->context = context; 774 handler->context = context;
724 handler->method_node = gpe_event_info->dispatch.method_node; 775 handler->method_node = gpe_event_info->dispatch.method_node;
725 handler->orig_flags = gpe_event_info->flags & 776 handler->original_flags = gpe_event_info->flags &
726 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 777 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
727 778
728 /* 779 /*
@@ -731,10 +782,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
731 * disabled now to avoid spurious execution of the handler. 782 * disabled now to avoid spurious execution of the handler.
732 */ 783 */
733 784
734 if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 785 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
735 && gpe_event_info->runtime_count) { 786 && gpe_event_info->runtime_count) {
736 handler->orig_enabled = 1; 787 handler->originally_enabled = 1;
737 (void)acpi_raw_disable_gpe(gpe_event_info); 788 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
738 } 789 }
739 790
740 /* Install the handler */ 791 /* Install the handler */
@@ -777,10 +828,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
777 ******************************************************************************/ 828 ******************************************************************************/
778acpi_status 829acpi_status
779acpi_remove_gpe_handler(acpi_handle gpe_device, 830acpi_remove_gpe_handler(acpi_handle gpe_device,
780 u32 gpe_number, acpi_event_handler address) 831 u32 gpe_number, acpi_gpe_handler address)
781{ 832{
782 struct acpi_gpe_event_info *gpe_event_info; 833 struct acpi_gpe_event_info *gpe_event_info;
783 struct acpi_handler_info *handler; 834 struct acpi_gpe_handler_info *handler;
784 acpi_status status; 835 acpi_status status;
785 acpi_cpu_flags flags; 836 acpi_cpu_flags flags;
786 837
@@ -835,7 +886,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
835 gpe_event_info->dispatch.method_node = handler->method_node; 886 gpe_event_info->dispatch.method_node = handler->method_node;
836 gpe_event_info->flags &= 887 gpe_event_info->flags &=
837 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 888 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
838 gpe_event_info->flags |= handler->orig_flags; 889 gpe_event_info->flags |= handler->original_flags;
839 890
840 /* 891 /*
841 * If the GPE was previously associated with a method and it was 892 * If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
843 * post-initialization configuration. 894 * post-initialization configuration.
844 */ 895 */
845 896
846 if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 897 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
847 && handler->orig_enabled) 898 && handler->originally_enabled)
848 (void)acpi_raw_enable_gpe(gpe_event_info); 899 (void)acpi_ev_add_gpe_reference(gpe_event_info);
849 900
850 /* Now we can free the handler object */ 901 /* Now we can free the handler object */
851 902
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index a1dabe3fd8ae..c57b5c707a77 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -43,18 +43,11 @@
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48#include "actables.h" 46#include "actables.h"
49 47
50#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evxfevnt") 49ACPI_MODULE_NAME("evxfevnt")
52 50
53/* Local prototypes */
54static acpi_status
55acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
56 struct acpi_gpe_block_info *gpe_block, void *context);
57
58/******************************************************************************* 51/*******************************************************************************
59 * 52 *
60 * FUNCTION: acpi_enable 53 * FUNCTION: acpi_enable
@@ -213,185 +206,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
213 206
214/******************************************************************************* 207/*******************************************************************************
215 * 208 *
216 * FUNCTION: acpi_gpe_wakeup
217 *
218 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
219 * gpe_number - GPE level within the GPE block
220 * Action - Enable or Disable
221 *
222 * RETURN: Status
223 *
224 * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
225 *
226 ******************************************************************************/
227acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
228{
229 acpi_status status = AE_OK;
230 struct acpi_gpe_event_info *gpe_event_info;
231 struct acpi_gpe_register_info *gpe_register_info;
232 acpi_cpu_flags flags;
233 u32 register_bit;
234
235 ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
236
237 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
238
239 /* Ensure that we have a valid GPE number */
240
241 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
242 if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
243 status = AE_BAD_PARAMETER;
244 goto unlock_and_exit;
245 }
246
247 gpe_register_info = gpe_event_info->register_info;
248 if (!gpe_register_info) {
249 status = AE_NOT_EXIST;
250 goto unlock_and_exit;
251 }
252
253 register_bit =
254 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
255
256 /* Perform the action */
257
258 switch (action) {
259 case ACPI_GPE_ENABLE:
260 ACPI_SET_BIT(gpe_register_info->enable_for_wake,
261 (u8)register_bit);
262 break;
263
264 case ACPI_GPE_DISABLE:
265 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
266 (u8)register_bit);
267 break;
268
269 default:
270 ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
271 status = AE_BAD_PARAMETER;
272 break;
273 }
274
275unlock_and_exit:
276 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
277 return_ACPI_STATUS(status);
278}
279
280ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
281
282/*******************************************************************************
283 *
284 * FUNCTION: acpi_enable_gpe
285 *
286 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
287 * gpe_number - GPE level within the GPE block
288 *
289 * RETURN: Status
290 *
291 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
292 * hardware-enabled.
293 *
294 ******************************************************************************/
295acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
296{
297 acpi_status status = AE_BAD_PARAMETER;
298 struct acpi_gpe_event_info *gpe_event_info;
299 acpi_cpu_flags flags;
300
301 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
302
303 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
304
305 /* Ensure that we have a valid GPE number */
306
307 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
308 if (gpe_event_info) {
309 status = acpi_raw_enable_gpe(gpe_event_info);
310 }
311
312 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
313 return_ACPI_STATUS(status);
314}
315ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_disable_gpe
320 *
321 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
322 * gpe_number - GPE level within the GPE block
323 *
324 * RETURN: Status
325 *
326 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
327 * removed, only then is the GPE disabled (for runtime GPEs), or
328 * the GPE mask bit disabled (for wake GPEs)
329 *
330 ******************************************************************************/
331acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
332{
333 acpi_status status = AE_BAD_PARAMETER;
334 struct acpi_gpe_event_info *gpe_event_info;
335 acpi_cpu_flags flags;
336
337 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
338
339 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
340
341 /* Ensure that we have a valid GPE number */
342
343 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
344 if (gpe_event_info) {
345 status = acpi_raw_disable_gpe(gpe_event_info) ;
346 }
347
348 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
349 return_ACPI_STATUS(status);
350}
351ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
352
353/*******************************************************************************
354 *
355 * FUNCTION: acpi_gpe_can_wake
356 *
357 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
358 * gpe_number - GPE level within the GPE block
359 *
360 * RETURN: Status
361 *
362 * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE
363 * has a corresponding method and is currently enabled, disable it
364 * (GPEs with corresponding methods are enabled unconditionally
365 * during initialization, but GPEs that can wake up are expected
366 * to be initially disabled).
367 *
368 ******************************************************************************/
369acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
370{
371 acpi_status status = AE_OK;
372 struct acpi_gpe_event_info *gpe_event_info;
373 acpi_cpu_flags flags;
374
375 ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
376
377 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
378
379 /* Ensure that we have a valid GPE number */
380
381 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
382 if (gpe_event_info) {
383 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
384 } else {
385 status = AE_BAD_PARAMETER;
386 }
387
388 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
389 return_ACPI_STATUS(status);
390}
391ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
392
393/*******************************************************************************
394 *
395 * FUNCTION: acpi_disable_event 209 * FUNCTION: acpi_disable_event
396 * 210 *
397 * PARAMETERS: Event - The fixed eventto be enabled 211 * PARAMETERS: Event - The fixed eventto be enabled
@@ -483,44 +297,6 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
483 297
484/******************************************************************************* 298/*******************************************************************************
485 * 299 *
486 * FUNCTION: acpi_clear_gpe
487 *
488 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
489 * gpe_number - GPE level within the GPE block
490 *
491 * RETURN: Status
492 *
493 * DESCRIPTION: Clear an ACPI event (general purpose)
494 *
495 ******************************************************************************/
496acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
497{
498 acpi_status status = AE_OK;
499 struct acpi_gpe_event_info *gpe_event_info;
500 acpi_cpu_flags flags;
501
502 ACPI_FUNCTION_TRACE(acpi_clear_gpe);
503
504 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
505
506 /* Ensure that we have a valid GPE number */
507
508 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
509 if (!gpe_event_info) {
510 status = AE_BAD_PARAMETER;
511 goto unlock_and_exit;
512 }
513
514 status = acpi_hw_clear_gpe(gpe_event_info);
515
516 unlock_and_exit:
517 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
518 return_ACPI_STATUS(status);
519}
520
521ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
522/*******************************************************************************
523 *
524 * FUNCTION: acpi_get_event_status 300 * FUNCTION: acpi_get_event_status
525 * 301 *
526 * PARAMETERS: Event - The fixed event 302 * PARAMETERS: Event - The fixed event
@@ -575,379 +351,3 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
575} 351}
576 352
577ACPI_EXPORT_SYMBOL(acpi_get_event_status) 353ACPI_EXPORT_SYMBOL(acpi_get_event_status)
578
579/*******************************************************************************
580 *
581 * FUNCTION: acpi_get_gpe_status
582 *
583 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
584 * gpe_number - GPE level within the GPE block
585 * event_status - Where the current status of the event will
586 * be returned
587 *
588 * RETURN: Status
589 *
590 * DESCRIPTION: Get status of an event (general purpose)
591 *
592 ******************************************************************************/
593acpi_status
594acpi_get_gpe_status(acpi_handle gpe_device,
595 u32 gpe_number, acpi_event_status *event_status)
596{
597 acpi_status status = AE_OK;
598 struct acpi_gpe_event_info *gpe_event_info;
599 acpi_cpu_flags flags;
600
601 ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
602
603 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
604
605 /* Ensure that we have a valid GPE number */
606
607 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
608 if (!gpe_event_info) {
609 status = AE_BAD_PARAMETER;
610 goto unlock_and_exit;
611 }
612
613 /* Obtain status on the requested GPE number */
614
615 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
616
617 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
618 *event_status |= ACPI_EVENT_FLAG_HANDLE;
619
620 unlock_and_exit:
621 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
622 return_ACPI_STATUS(status);
623}
624
625ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
626/*******************************************************************************
627 *
628 * FUNCTION: acpi_install_gpe_block
629 *
630 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
631 * gpe_block_address - Address and space_iD
632 * register_count - Number of GPE register pairs in the block
633 * interrupt_number - H/W interrupt for the block
634 *
635 * RETURN: Status
636 *
637 * DESCRIPTION: Create and Install a block of GPE registers
638 *
639 ******************************************************************************/
640acpi_status
641acpi_install_gpe_block(acpi_handle gpe_device,
642 struct acpi_generic_address *gpe_block_address,
643 u32 register_count, u32 interrupt_number)
644{
645 acpi_status status = AE_OK;
646 union acpi_operand_object *obj_desc;
647 struct acpi_namespace_node *node;
648 struct acpi_gpe_block_info *gpe_block;
649
650 ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
651
652 if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
653 return_ACPI_STATUS(AE_BAD_PARAMETER);
654 }
655
656 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
657 if (ACPI_FAILURE(status)) {
658 return (status);
659 }
660
661 node = acpi_ns_validate_handle(gpe_device);
662 if (!node) {
663 status = AE_BAD_PARAMETER;
664 goto unlock_and_exit;
665 }
666
667 /*
668 * For user-installed GPE Block Devices, the gpe_block_base_number
669 * is always zero
670 */
671 status =
672 acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
673 interrupt_number, &gpe_block);
674 if (ACPI_FAILURE(status)) {
675 goto unlock_and_exit;
676 }
677
678 /* Install block in the device_object attached to the node */
679
680 obj_desc = acpi_ns_get_attached_object(node);
681 if (!obj_desc) {
682
683 /*
684 * No object, create a new one (Device nodes do not always have
685 * an attached object)
686 */
687 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
688 if (!obj_desc) {
689 status = AE_NO_MEMORY;
690 goto unlock_and_exit;
691 }
692
693 status =
694 acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
695
696 /* Remove local reference to the object */
697
698 acpi_ut_remove_reference(obj_desc);
699
700 if (ACPI_FAILURE(status)) {
701 goto unlock_and_exit;
702 }
703 }
704
705 /* Now install the GPE block in the device_object */
706
707 obj_desc->device.gpe_block = gpe_block;
708
709 unlock_and_exit:
710 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
711 return_ACPI_STATUS(status);
712}
713
714ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
715
716/*******************************************************************************
717 *
718 * FUNCTION: acpi_remove_gpe_block
719 *
720 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
721 *
722 * RETURN: Status
723 *
724 * DESCRIPTION: Remove a previously installed block of GPE registers
725 *
726 ******************************************************************************/
727acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
728{
729 union acpi_operand_object *obj_desc;
730 acpi_status status;
731 struct acpi_namespace_node *node;
732
733 ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
734
735 if (!gpe_device) {
736 return_ACPI_STATUS(AE_BAD_PARAMETER);
737 }
738
739 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
740 if (ACPI_FAILURE(status)) {
741 return (status);
742 }
743
744 node = acpi_ns_validate_handle(gpe_device);
745 if (!node) {
746 status = AE_BAD_PARAMETER;
747 goto unlock_and_exit;
748 }
749
750 /* Get the device_object attached to the node */
751
752 obj_desc = acpi_ns_get_attached_object(node);
753 if (!obj_desc || !obj_desc->device.gpe_block) {
754 return_ACPI_STATUS(AE_NULL_OBJECT);
755 }
756
757 /* Delete the GPE block (but not the device_object) */
758
759 status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
760 if (ACPI_SUCCESS(status)) {
761 obj_desc->device.gpe_block = NULL;
762 }
763
764 unlock_and_exit:
765 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
766 return_ACPI_STATUS(status);
767}
768
769ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
770
771/*******************************************************************************
772 *
773 * FUNCTION: acpi_get_gpe_device
774 *
775 * PARAMETERS: Index - System GPE index (0-current_gpe_count)
776 * gpe_device - Where the parent GPE Device is returned
777 *
778 * RETURN: Status
779 *
780 * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
781 * gpe device indicates that the gpe number is contained in one of
782 * the FADT-defined gpe blocks. Otherwise, the GPE block device.
783 *
784 ******************************************************************************/
785acpi_status
786acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
787{
788 struct acpi_gpe_device_info info;
789 acpi_status status;
790
791 ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
792
793 if (!gpe_device) {
794 return_ACPI_STATUS(AE_BAD_PARAMETER);
795 }
796
797 if (index >= acpi_current_gpe_count) {
798 return_ACPI_STATUS(AE_NOT_EXIST);
799 }
800
801 /* Setup and walk the GPE list */
802
803 info.index = index;
804 info.status = AE_NOT_EXIST;
805 info.gpe_device = NULL;
806 info.next_block_base_index = 0;
807
808 status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
809 if (ACPI_FAILURE(status)) {
810 return_ACPI_STATUS(status);
811 }
812
813 *gpe_device = info.gpe_device;
814 return_ACPI_STATUS(info.status);
815}
816
817ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
818
819/*******************************************************************************
820 *
821 * FUNCTION: acpi_ev_get_gpe_device
822 *
823 * PARAMETERS: GPE_WALK_CALLBACK
824 *
825 * RETURN: Status
826 *
827 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
828 * block device. NULL if the GPE is one of the FADT-defined GPEs.
829 *
830 ******************************************************************************/
831static acpi_status
832acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
833 struct acpi_gpe_block_info *gpe_block, void *context)
834{
835 struct acpi_gpe_device_info *info = context;
836
837 /* Increment Index by the number of GPEs in this block */
838
839 info->next_block_base_index += gpe_block->gpe_count;
840
841 if (info->index < info->next_block_base_index) {
842 /*
843 * The GPE index is within this block, get the node. Leave the node
844 * NULL for the FADT-defined GPEs
845 */
846 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
847 info->gpe_device = gpe_block->node;
848 }
849
850 info->status = AE_OK;
851 return (AE_CTRL_END);
852 }
853
854 return (AE_OK);
855}
856
857/******************************************************************************
858 *
859 * FUNCTION: acpi_disable_all_gpes
860 *
861 * PARAMETERS: None
862 *
863 * RETURN: Status
864 *
865 * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
866 *
867 ******************************************************************************/
868
869acpi_status acpi_disable_all_gpes(void)
870{
871 acpi_status status;
872
873 ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
874
875 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
876 if (ACPI_FAILURE(status)) {
877 return_ACPI_STATUS(status);
878 }
879
880 status = acpi_hw_disable_all_gpes();
881 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
882
883 return_ACPI_STATUS(status);
884}
885
886/******************************************************************************
887 *
888 * FUNCTION: acpi_enable_all_runtime_gpes
889 *
890 * PARAMETERS: None
891 *
892 * RETURN: Status
893 *
894 * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
895 *
896 ******************************************************************************/
897
898acpi_status acpi_enable_all_runtime_gpes(void)
899{
900 acpi_status status;
901
902 ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
903
904 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
905 if (ACPI_FAILURE(status)) {
906 return_ACPI_STATUS(status);
907 }
908
909 status = acpi_hw_enable_all_runtime_gpes();
910 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
911
912 return_ACPI_STATUS(status);
913}
914
915/******************************************************************************
916 *
917 * FUNCTION: acpi_update_gpes
918 *
919 * PARAMETERS: None
920 *
921 * RETURN: None
922 *
923 * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
924 * are not pointed to by any device _PRW methods indicating that
925 * these GPEs are generally intended for system or device wakeup
926 * (such GPEs have to be enabled directly when the devices whose
927 * _PRW methods point to them are set up for wakeup signaling).
928 *
929 ******************************************************************************/
930
931acpi_status acpi_update_gpes(void)
932{
933 acpi_status status;
934
935 ACPI_FUNCTION_TRACE(acpi_update_gpes);
936
937 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
938 if (ACPI_FAILURE(status)) {
939 return_ACPI_STATUS(status);
940 } else if (acpi_all_gpes_initialized) {
941 goto unlock;
942 }
943
944 status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
945 if (ACPI_SUCCESS(status)) {
946 acpi_all_gpes_initialized = TRUE;
947 }
948
949unlock:
950 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
951
952 return_ACPI_STATUS(status);
953}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644
index 000000000000..e9562a7cb2f9
--- /dev/null
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -0,0 +1,669 @@
1/******************************************************************************
2 *
3 * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxfgpe")
51
52/******************************************************************************
53 *
54 * FUNCTION: acpi_update_all_gpes
55 *
56 * PARAMETERS: None
57 *
58 * RETURN: Status
59 *
60 * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
61 * associated _Lxx or _Exx methods and are not pointed to by any
62 * device _PRW methods (this indicates that these GPEs are
63 * generally intended for system or device wakeup. Such GPEs
64 * have to be enabled directly when the devices whose _PRW
65 * methods point to them are set up for wakeup signaling.)
66 *
67 * NOTE: Should be called after any GPEs are added to the system. Primarily,
68 * after the system _PRW methods have been run, but also after a GPE Block
69 * Device has been added or if any new GPE methods have been added via a
70 * dynamic table load.
71 *
72 ******************************************************************************/
73
74acpi_status acpi_update_all_gpes(void)
75{
76 acpi_status status;
77
78 ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
79
80 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
81 if (ACPI_FAILURE(status)) {
82 return_ACPI_STATUS(status);
83 }
84
85 if (acpi_gbl_all_gpes_initialized) {
86 goto unlock_and_exit;
87 }
88
89 status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
90 if (ACPI_SUCCESS(status)) {
91 acpi_gbl_all_gpes_initialized = TRUE;
92 }
93
94unlock_and_exit:
95 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
96
97 return_ACPI_STATUS(status);
98}
99
100ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
101
102/*******************************************************************************
103 *
104 * FUNCTION: acpi_enable_gpe
105 *
106 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
107 * gpe_number - GPE level within the GPE block
108 *
109 * RETURN: Status
110 *
111 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
112 * hardware-enabled.
113 *
114 ******************************************************************************/
115
116acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
117{
118 acpi_status status = AE_BAD_PARAMETER;
119 struct acpi_gpe_event_info *gpe_event_info;
120 acpi_cpu_flags flags;
121
122 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
123
124 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
125
126 /* Ensure that we have a valid GPE number */
127
128 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
129 if (gpe_event_info) {
130 status = acpi_ev_add_gpe_reference(gpe_event_info);
131 }
132
133 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
134 return_ACPI_STATUS(status);
135}
136ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
137
138/*******************************************************************************
139 *
140 * FUNCTION: acpi_disable_gpe
141 *
142 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
143 * gpe_number - GPE level within the GPE block
144 *
145 * RETURN: Status
146 *
147 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
148 * removed, only then is the GPE disabled (for runtime GPEs), or
149 * the GPE mask bit disabled (for wake GPEs)
150 *
151 ******************************************************************************/
152
153acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
154{
155 acpi_status status = AE_BAD_PARAMETER;
156 struct acpi_gpe_event_info *gpe_event_info;
157 acpi_cpu_flags flags;
158
159 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
160
161 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
162
163 /* Ensure that we have a valid GPE number */
164
165 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
166 if (gpe_event_info) {
167 status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
168 }
169
170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
171 return_ACPI_STATUS(status);
172}
173ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
174
175
176/*******************************************************************************
177 *
178 * FUNCTION: acpi_setup_gpe_for_wake
179 *
180 * PARAMETERS: wake_device - Device associated with the GPE (via _PRW)
181 * gpe_device - Parent GPE Device. NULL for GPE0/GPE1
182 * gpe_number - GPE level within the GPE block
183 *
184 * RETURN: Status
185 *
186 * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
187 * interface is intended to be used as the host executes the
188 * _PRW methods (Power Resources for Wake) in the system tables.
189 * Each _PRW appears under a Device Object (The wake_device), and
190 * contains the info for the wake GPE associated with the
191 * wake_device.
192 *
193 ******************************************************************************/
194acpi_status
195acpi_setup_gpe_for_wake(acpi_handle wake_device,
196 acpi_handle gpe_device, u32 gpe_number)
197{
198 acpi_status status = AE_BAD_PARAMETER;
199 struct acpi_gpe_event_info *gpe_event_info;
200 struct acpi_namespace_node *device_node;
201 acpi_cpu_flags flags;
202
203 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
204
205 /* Parameter Validation */
206
207 if (!wake_device) {
208 /*
209 * By forcing wake_device to be valid, we automatically enable the
210 * implicit notify feature on all hosts.
211 */
212 return_ACPI_STATUS(AE_BAD_PARAMETER);
213 }
214
215 /* Validate wake_device is of type Device */
216
217 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
218 if (device_node->type != ACPI_TYPE_DEVICE) {
219 return_ACPI_STATUS(AE_BAD_PARAMETER);
220 }
221
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
223
224 /* Ensure that we have a valid GPE number */
225
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
227 if (gpe_event_info) {
228 /*
229 * If there is no method or handler for this GPE, then the
230 * wake_device will be notified whenever this GPE fires (aka
231 * "implicit notify") Note: The GPE is assumed to be
232 * level-triggered (for windows compatibility).
233 */
234 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
235 ACPI_GPE_DISPATCH_NONE) {
236 gpe_event_info->flags =
237 (ACPI_GPE_DISPATCH_NOTIFY |
238 ACPI_GPE_LEVEL_TRIGGERED);
239 gpe_event_info->dispatch.device_node = device_node;
240 }
241
242 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
243 status = AE_OK;
244 }
245
246 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
247 return_ACPI_STATUS(status);
248}
249ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
250
251/*******************************************************************************
252 *
253 * FUNCTION: acpi_set_gpe_wake_mask
254 *
255 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
256 * gpe_number - GPE level within the GPE block
257 * Action - Enable or Disable
258 *
259 * RETURN: Status
260 *
261 * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
262 * already be marked as a WAKE GPE.
263 *
264 ******************************************************************************/
265
266acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
267{
268 acpi_status status = AE_OK;
269 struct acpi_gpe_event_info *gpe_event_info;
270 struct acpi_gpe_register_info *gpe_register_info;
271 acpi_cpu_flags flags;
272 u32 register_bit;
273
274 ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
275
276 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
277
278 /*
279 * Ensure that we have a valid GPE number and that this GPE is in
280 * fact a wake GPE
281 */
282 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
283 if (!gpe_event_info) {
284 status = AE_BAD_PARAMETER;
285 goto unlock_and_exit;
286 }
287
288 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
289 status = AE_TYPE;
290 goto unlock_and_exit;
291 }
292
293 gpe_register_info = gpe_event_info->register_info;
294 if (!gpe_register_info) {
295 status = AE_NOT_EXIST;
296 goto unlock_and_exit;
297 }
298
299 register_bit =
300 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
301
302 /* Perform the action */
303
304 switch (action) {
305 case ACPI_GPE_ENABLE:
306 ACPI_SET_BIT(gpe_register_info->enable_for_wake,
307 (u8)register_bit);
308 break;
309
310 case ACPI_GPE_DISABLE:
311 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
312 (u8)register_bit);
313 break;
314
315 default:
316 ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
317 status = AE_BAD_PARAMETER;
318 break;
319 }
320
321unlock_and_exit:
322 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
323 return_ACPI_STATUS(status);
324}
325
326ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
327
328/*******************************************************************************
329 *
330 * FUNCTION: acpi_clear_gpe
331 *
332 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
333 * gpe_number - GPE level within the GPE block
334 *
335 * RETURN: Status
336 *
337 * DESCRIPTION: Clear an ACPI event (general purpose)
338 *
339 ******************************************************************************/
340acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
341{
342 acpi_status status = AE_OK;
343 struct acpi_gpe_event_info *gpe_event_info;
344 acpi_cpu_flags flags;
345
346 ACPI_FUNCTION_TRACE(acpi_clear_gpe);
347
348 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
349
350 /* Ensure that we have a valid GPE number */
351
352 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
353 if (!gpe_event_info) {
354 status = AE_BAD_PARAMETER;
355 goto unlock_and_exit;
356 }
357
358 status = acpi_hw_clear_gpe(gpe_event_info);
359
360 unlock_and_exit:
361 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
362 return_ACPI_STATUS(status);
363}
364
365ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
366
367/*******************************************************************************
368 *
369 * FUNCTION: acpi_get_gpe_status
370 *
371 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
372 * gpe_number - GPE level within the GPE block
373 * event_status - Where the current status of the event will
374 * be returned
375 *
376 * RETURN: Status
377 *
378 * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
379 *
380 ******************************************************************************/
381acpi_status
382acpi_get_gpe_status(acpi_handle gpe_device,
383 u32 gpe_number, acpi_event_status *event_status)
384{
385 acpi_status status = AE_OK;
386 struct acpi_gpe_event_info *gpe_event_info;
387 acpi_cpu_flags flags;
388
389 ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
390
391 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
392
393 /* Ensure that we have a valid GPE number */
394
395 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
396 if (!gpe_event_info) {
397 status = AE_BAD_PARAMETER;
398 goto unlock_and_exit;
399 }
400
401 /* Obtain status on the requested GPE number */
402
403 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
404
405 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
406 *event_status |= ACPI_EVENT_FLAG_HANDLE;
407
408 unlock_and_exit:
409 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
410 return_ACPI_STATUS(status);
411}
412
413ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
414
415/******************************************************************************
416 *
417 * FUNCTION: acpi_disable_all_gpes
418 *
419 * PARAMETERS: None
420 *
421 * RETURN: Status
422 *
423 * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
424 *
425 ******************************************************************************/
426
427acpi_status acpi_disable_all_gpes(void)
428{
429 acpi_status status;
430
431 ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
432
433 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
434 if (ACPI_FAILURE(status)) {
435 return_ACPI_STATUS(status);
436 }
437
438 status = acpi_hw_disable_all_gpes();
439 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
440
441 return_ACPI_STATUS(status);
442}
443
444ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
445
446/******************************************************************************
447 *
448 * FUNCTION: acpi_enable_all_runtime_gpes
449 *
450 * PARAMETERS: None
451 *
452 * RETURN: Status
453 *
454 * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
455 *
456 ******************************************************************************/
457
458acpi_status acpi_enable_all_runtime_gpes(void)
459{
460 acpi_status status;
461
462 ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
463
464 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
465 if (ACPI_FAILURE(status)) {
466 return_ACPI_STATUS(status);
467 }
468
469 status = acpi_hw_enable_all_runtime_gpes();
470 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
471
472 return_ACPI_STATUS(status);
473}
474
475ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
476
477/*******************************************************************************
478 *
479 * FUNCTION: acpi_install_gpe_block
480 *
481 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
482 * gpe_block_address - Address and space_iD
483 * register_count - Number of GPE register pairs in the block
484 * interrupt_number - H/W interrupt for the block
485 *
486 * RETURN: Status
487 *
488 * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
489 * enabled here.
490 *
491 ******************************************************************************/
492acpi_status
493acpi_install_gpe_block(acpi_handle gpe_device,
494 struct acpi_generic_address *gpe_block_address,
495 u32 register_count, u32 interrupt_number)
496{
497 acpi_status status;
498 union acpi_operand_object *obj_desc;
499 struct acpi_namespace_node *node;
500 struct acpi_gpe_block_info *gpe_block;
501
502 ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
503
504 if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
505 return_ACPI_STATUS(AE_BAD_PARAMETER);
506 }
507
508 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
509 if (ACPI_FAILURE(status)) {
510 return (status);
511 }
512
513 node = acpi_ns_validate_handle(gpe_device);
514 if (!node) {
515 status = AE_BAD_PARAMETER;
516 goto unlock_and_exit;
517 }
518
519 /*
520 * For user-installed GPE Block Devices, the gpe_block_base_number
521 * is always zero
522 */
523 status =
524 acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
525 interrupt_number, &gpe_block);
526 if (ACPI_FAILURE(status)) {
527 goto unlock_and_exit;
528 }
529
530 /* Install block in the device_object attached to the node */
531
532 obj_desc = acpi_ns_get_attached_object(node);
533 if (!obj_desc) {
534
535 /*
536 * No object, create a new one (Device nodes do not always have
537 * an attached object)
538 */
539 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
540 if (!obj_desc) {
541 status = AE_NO_MEMORY;
542 goto unlock_and_exit;
543 }
544
545 status =
546 acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
547
548 /* Remove local reference to the object */
549
550 acpi_ut_remove_reference(obj_desc);
551
552 if (ACPI_FAILURE(status)) {
553 goto unlock_and_exit;
554 }
555 }
556
557 /* Now install the GPE block in the device_object */
558
559 obj_desc->device.gpe_block = gpe_block;
560
561 unlock_and_exit:
562 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
563 return_ACPI_STATUS(status);
564}
565
566ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
567
568/*******************************************************************************
569 *
570 * FUNCTION: acpi_remove_gpe_block
571 *
572 * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
573 *
574 * RETURN: Status
575 *
576 * DESCRIPTION: Remove a previously installed block of GPE registers
577 *
578 ******************************************************************************/
579acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
580{
581 union acpi_operand_object *obj_desc;
582 acpi_status status;
583 struct acpi_namespace_node *node;
584
585 ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
586
587 if (!gpe_device) {
588 return_ACPI_STATUS(AE_BAD_PARAMETER);
589 }
590
591 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
592 if (ACPI_FAILURE(status)) {
593 return (status);
594 }
595
596 node = acpi_ns_validate_handle(gpe_device);
597 if (!node) {
598 status = AE_BAD_PARAMETER;
599 goto unlock_and_exit;
600 }
601
602 /* Get the device_object attached to the node */
603
604 obj_desc = acpi_ns_get_attached_object(node);
605 if (!obj_desc || !obj_desc->device.gpe_block) {
606 return_ACPI_STATUS(AE_NULL_OBJECT);
607 }
608
609 /* Delete the GPE block (but not the device_object) */
610
611 status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
612 if (ACPI_SUCCESS(status)) {
613 obj_desc->device.gpe_block = NULL;
614 }
615
616 unlock_and_exit:
617 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
618 return_ACPI_STATUS(status);
619}
620
621ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
622
623/*******************************************************************************
624 *
625 * FUNCTION: acpi_get_gpe_device
626 *
627 * PARAMETERS: Index - System GPE index (0-current_gpe_count)
628 * gpe_device - Where the parent GPE Device is returned
629 *
630 * RETURN: Status
631 *
632 * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
633 * gpe device indicates that the gpe number is contained in one of
634 * the FADT-defined gpe blocks. Otherwise, the GPE block device.
635 *
636 ******************************************************************************/
637acpi_status
638acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
639{
640 struct acpi_gpe_device_info info;
641 acpi_status status;
642
643 ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
644
645 if (!gpe_device) {
646 return_ACPI_STATUS(AE_BAD_PARAMETER);
647 }
648
649 if (index >= acpi_current_gpe_count) {
650 return_ACPI_STATUS(AE_NOT_EXIST);
651 }
652
653 /* Setup and walk the GPE list */
654
655 info.index = index;
656 info.status = AE_NOT_EXIST;
657 info.gpe_device = NULL;
658 info.next_block_base_index = 0;
659
660 status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
661 if (ACPI_FAILURE(status)) {
662 return_ACPI_STATUS(status);
663 }
664
665 *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
666 return_ACPI_STATUS(info.status);
667}
668
669ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index ce9314f79451..eb7386763712 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 18832205b631..745a42b401f5 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index b73bc50c5b76..74162a11817d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c61b48c73f5..e7b372d17667 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -482,13 +482,11 @@ acpi_ex_create_method(u8 * aml_start,
482 obj_desc->method.aml_length = aml_length; 482 obj_desc->method.aml_length = aml_length;
483 483
484 /* 484 /*
485 * Disassemble the method flags. Split off the Arg Count 485 * Disassemble the method flags. Split off the arg_count, Serialized
486 * for efficiency 486 * flag, and sync_level for efficiency.
487 */ 487 */
488 method_flags = (u8) operand[1]->integer.value; 488 method_flags = (u8) operand[1]->integer.value;
489 489
490 obj_desc->method.method_flags =
491 (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
492 obj_desc->method.param_count = 490 obj_desc->method.param_count =
493 (u8) (method_flags & AML_METHOD_ARG_COUNT); 491 (u8) (method_flags & AML_METHOD_ARG_COUNT);
494 492
@@ -497,6 +495,8 @@ acpi_ex_create_method(u8 * aml_start,
497 * created for this method when it is parsed. 495 * created for this method when it is parsed.
498 */ 496 */
499 if (method_flags & AML_METHOD_SERIALIZED) { 497 if (method_flags & AML_METHOD_SERIALIZED) {
498 obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED;
499
500 /* 500 /*
501 * ACPI 1.0: sync_level = 0 501 * ACPI 1.0: sync_level = 0
502 * ACPI 2.0: sync_level = sync_level in method declaration 502 * ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index be8c98b480d7..c7a2f1edd282 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f067bbb0d961..61b8c0e8b74d 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
122 122
123static struct acpi_exdump_info acpi_ex_dump_method[9] = { 123static struct acpi_exdump_info acpi_ex_dump_method[9] = {
124 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, 124 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
125 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"}, 125 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"},
126 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), 126 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
127 "Parameter Count"}, 127 "Parameter Count"},
128 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, 128 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index f17d2ff0031b..0bde2230c028 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 38293fd3e088..6c79c29f082d 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 95db4be0877b..703d88ed0b3d 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 6af14e43f839..be1c56ead653 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index d11e539ef763..49ec049c157e 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 84e4d185aa25..236ead14b7f7 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 10e104cf0fb9..2571b4a310f4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 7a08d23befcd..1b48d9d28c9a 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4b50730cf9a0..f4a2787e8e92 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 7aae29f73d3f..cc95e2000406 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index de17e10da0ed..f0d5e14f1f2c 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 1fa4289a687e..55997e46948b 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7ca35ea8acea..db502cd7d934 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 8c97cfd6a0fd..e3bb00ccdff5 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 1624436ba4c5..c0c8842dd344 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index d4af684620ca..a979017d56b8 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2010, Intel Corp. 10 * Copyright (C) 2000 - 2011, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index e972b667b09b..dc665cc554de 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 675aaa91a770..df66e7b686be 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4093522eed45..8ad93146dd32 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index b44274a0b62c..fc380d3d45ab 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 14750db2a1b8..f610d88a66be 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -62,10 +62,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
62 * PARAMETERS: gpe_event_info - Info block for the GPE 62 * PARAMETERS: gpe_event_info - Info block for the GPE
63 * gpe_register_info - Info block for the GPE register 63 * gpe_register_info - Info block for the GPE register
64 * 64 *
65 * RETURN: Status 65 * RETURN: Register mask with a one in the GPE bit position
66 * 66 *
67 * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given 67 * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
68 * GPE set. 68 * correct position for the input GPE.
69 * 69 *
70 ******************************************************************************/ 70 ******************************************************************************/
71 71
@@ -85,12 +85,12 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
85 * 85 *
86 * RETURN: Status 86 * RETURN: Status
87 * 87 *
88 * DESCRIPTION: Enable or disable a single GPE in its enable register. 88 * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
89 * 89 *
90 ******************************************************************************/ 90 ******************************************************************************/
91 91
92acpi_status 92acpi_status
93acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) 93acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
94{ 94{
95 struct acpi_gpe_register_info *gpe_register_info; 95 struct acpi_gpe_register_info *gpe_register_info;
96 acpi_status status; 96 acpi_status status;
@@ -113,14 +113,20 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
113 return (status); 113 return (status);
114 } 114 }
115 115
116 /* Set ot clear just the bit that corresponds to this GPE */ 116 /* Set or clear just the bit that corresponds to this GPE */
117 117
118 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, 118 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
119 gpe_register_info); 119 gpe_register_info);
120 switch (action) { 120 switch (action) {
121 case ACPI_GPE_COND_ENABLE: 121 case ACPI_GPE_CONDITIONAL_ENABLE:
122 if (!(register_bit & gpe_register_info->enable_for_run)) 122
123 /* Only enable if the enable_for_run bit is set */
124
125 if (!(register_bit & gpe_register_info->enable_for_run)) {
123 return (AE_BAD_PARAMETER); 126 return (AE_BAD_PARAMETER);
127 }
128
129 /*lint -fallthrough */
124 130
125 case ACPI_GPE_ENABLE: 131 case ACPI_GPE_ENABLE:
126 ACPI_SET_BIT(enable_mask, register_bit); 132 ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
131 break; 137 break;
132 138
133 default: 139 default:
134 ACPI_ERROR((AE_INFO, "Invalid action\n")); 140 ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
135 return (AE_BAD_PARAMETER); 141 return (AE_BAD_PARAMETER);
136 } 142 }
137 143
@@ -168,13 +174,13 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
168 return (AE_NOT_EXIST); 174 return (AE_NOT_EXIST);
169 } 175 }
170 176
171 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
172 gpe_register_info);
173
174 /* 177 /*
175 * Write a one to the appropriate bit in the status register to 178 * Write a one to the appropriate bit in the status register to
176 * clear this GPE. 179 * clear this GPE.
177 */ 180 */
181 register_bit =
182 acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
183
178 status = acpi_hw_write(register_bit, 184 status = acpi_hw_write(register_bit,
179 &gpe_register_info->status_address); 185 &gpe_register_info->status_address);
180 186
@@ -201,8 +207,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
201 u32 in_byte; 207 u32 in_byte;
202 u32 register_bit; 208 u32 register_bit;
203 struct acpi_gpe_register_info *gpe_register_info; 209 struct acpi_gpe_register_info *gpe_register_info;
204 acpi_status status;
205 acpi_event_status local_event_status = 0; 210 acpi_event_status local_event_status = 0;
211 acpi_status status;
206 212
207 ACPI_FUNCTION_ENTRY(); 213 ACPI_FUNCTION_ENTRY();
208 214
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index ad21c7d8bf4f..050fd227951b 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5d1273b660ae..55accb7018bb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2010, Intel Corp. 10 * Copyright (C) 2000 - 2011, Intel Corp.
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3796811276ac..2ac28bbe8827 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 1ef8e0bb250b..9c8eb71a12fb 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e1d9c777b213..5f1605874655 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 50cc3be77724..6f98d210e71c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 0cd925be5fc1..d93172fd15a8 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -163,9 +163,9 @@ acpi_status acpi_ns_root_initialize(void)
163#else 163#else
164 /* Mark this as a very SPECIAL method */ 164 /* Mark this as a very SPECIAL method */
165 165
166 obj_desc->method.method_flags = 166 obj_desc->method.info_flags =
167 AML_METHOD_INTERNAL_ONLY; 167 ACPI_METHOD_INTERNAL_ONLY;
168 obj_desc->method.extra.implementation = 168 obj_desc->method.dispatch.implementation =
169 acpi_ut_osi_implementation; 169 acpi_ut_osi_implementation;
170#endif 170#endif
171 break; 171 break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1e5ff803d9ad..1d0ef15d158f 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -234,8 +234,8 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
234 * modified the namespace. This is used for cleanup when the 234 * modified the namespace. This is used for cleanup when the
235 * method exits. 235 * method exits.
236 */ 236 */
237 walk_state->method_desc->method.flags |= 237 walk_state->method_desc->method.info_flags |=
238 AOPOBJ_MODIFIED_NAMESPACE; 238 ACPI_METHOD_MODIFIED_NAMESPACE;
239 } 239 }
240 } 240 }
241 241
@@ -341,6 +341,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
341{ 341{
342 struct acpi_namespace_node *child_node = NULL; 342 struct acpi_namespace_node *child_node = NULL;
343 u32 level = 1; 343 u32 level = 1;
344 acpi_status status;
344 345
345 ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree); 346 ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
346 347
@@ -348,6 +349,13 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
348 return_VOID; 349 return_VOID;
349 } 350 }
350 351
352 /* Lock namespace for possible update */
353
354 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
355 if (ACPI_FAILURE(status)) {
356 return_VOID;
357 }
358
351 /* 359 /*
352 * Traverse the tree of objects until we bubble back up 360 * Traverse the tree of objects until we bubble back up
353 * to where we started. 361 * to where we started.
@@ -397,6 +405,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
397 } 405 }
398 } 406 }
399 407
408 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
400 return_VOID; 409 return_VOID;
401} 410}
402 411
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index a54dc39e304b..b683cc2ff9d3 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -624,9 +624,22 @@ acpi_ns_dump_objects(acpi_object_type type,
624 acpi_owner_id owner_id, acpi_handle start_handle) 624 acpi_owner_id owner_id, acpi_handle start_handle)
625{ 625{
626 struct acpi_walk_info info; 626 struct acpi_walk_info info;
627 acpi_status status;
627 628
628 ACPI_FUNCTION_ENTRY(); 629 ACPI_FUNCTION_ENTRY();
629 630
631 /*
632 * Just lock the entire namespace for the duration of the dump.
633 * We don't want any changes to the namespace during this time,
634 * especially the temporary nodes since we are going to display
635 * them also.
636 */
637 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
638 if (ACPI_FAILURE(status)) {
639 acpi_os_printf("Could not acquire namespace mutex\n");
640 return;
641 }
642
630 info.debug_level = ACPI_LV_TABLES; 643 info.debug_level = ACPI_LV_TABLES;
631 info.owner_id = owner_id; 644 info.owner_id = owner_id;
632 info.display_type = display_type; 645 info.display_type = display_type;
@@ -636,6 +649,8 @@ acpi_ns_dump_objects(acpi_object_type type,
636 ACPI_NS_WALK_TEMP_NODES, 649 ACPI_NS_WALK_TEMP_NODES,
637 acpi_ns_dump_one_object, NULL, 650 acpi_ns_dump_one_object, NULL,
638 (void *)&info, NULL); 651 (void *)&info, NULL);
652
653 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
639} 654}
640#endif /* ACPI_FUTURE_USAGE */ 655#endif /* ACPI_FUTURE_USAGE */
641 656
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index d2a97921e249..2ed294b7a4db 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f52829cc294b..c1bd02b1a058 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -389,7 +389,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
389 * acpi_gbl_root_node->Object is NULL at PASS1. 389 * acpi_gbl_root_node->Object is NULL at PASS1.
390 */ 390 */
391 if ((type == ACPI_TYPE_DEVICE) && parent_node->object) { 391 if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
392 method_obj->method.extra.handler = 392 method_obj->method.dispatch.handler =
393 parent_node->object->device.handler; 393 parent_node->object->device.handler;
394 } 394 }
395 395
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 0cac7ec0d2ec..fd7c6380e294 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index df18be94fefe..5f7dc691c183 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d3104af57e13..d5fa520c3de5 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 41a9213dd5af..3bb8bf105ea2 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 5808c89e9fac..b3234fa795b8 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7096bcda0c72..9fb03fa8ffde 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d1c136692667..1d76ac85b5e7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4ef9f43ea926..973883babee1 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 41102a84272f..28b0d7a62b99 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a7d6ad9c111b..cb1b104a69a2 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 2cd5be8fe10f..345f0c3c6ad2 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index ebef8a7fd707..c53f0040e490 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b01e45a415e3..3fd4526f3dba 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -603,10 +603,9 @@ acpi_status acpi_install_method(u8 *buffer)
603 method_obj->method.param_count = (u8) 603 method_obj->method.param_count = (u8)
604 (method_flags & AML_METHOD_ARG_COUNT); 604 (method_flags & AML_METHOD_ARG_COUNT);
605 605
606 method_obj->method.method_flags = (u8)
607 (method_flags & ~AML_METHOD_ARG_COUNT);
608
609 if (method_flags & AML_METHOD_SERIALIZED) { 606 if (method_flags & AML_METHOD_SERIALIZED) {
607 method_obj->method.info_flags = ACPI_METHOD_SERIALIZED;
608
610 method_obj->method.sync_level = (u8) 609 method_obj->method.sync_level = (u8)
611 ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4); 610 ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
612 } 611 }
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index a1f04e9b8030..db7660f8b869 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 7df1a4c95274..e1fad0ee0136 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 2f2e7760938c..01dd70d1de51 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -655,7 +655,7 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
655 method_obj->method.aml_start = aml_start; 655 method_obj->method.aml_start = aml_start;
656 method_obj->method.aml_length = aml_length; 656 method_obj->method.aml_length = aml_length;
657 method_obj->method.owner_id = owner_id; 657 method_obj->method.owner_id = owner_id;
658 method_obj->method.flags |= AOPOBJ_MODULE_LEVEL; 658 method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
659 659
660 /* 660 /*
661 * Save the parent node in next_object. This is cheating, but we 661 * Save the parent node in next_object. This is cheating, but we
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 2b0c3be2b1b8..bed08de7528c 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8d81542194d4..9bb0cbd37b5e 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,6 @@
55#include "acparser.h" 55#include "acparser.h"
56#include "acdispat.h" 56#include "acdispat.h"
57#include "amlcode.h" 57#include "amlcode.h"
58#include "acnamesp.h"
59#include "acinterp.h" 58#include "acinterp.h"
60 59
61#define _COMPONENT ACPI_PARSER 60#define _COMPONENT ACPI_PARSER
@@ -539,24 +538,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
539 /* Check for possible multi-thread reentrancy problem */ 538 /* Check for possible multi-thread reentrancy problem */
540 539
541 if ((status == AE_ALREADY_EXISTS) && 540 if ((status == AE_ALREADY_EXISTS) &&
542 (!walk_state->method_desc->method.mutex)) { 541 (!(walk_state->method_desc->method.
543 ACPI_INFO((AE_INFO, 542 info_flags & ACPI_METHOD_SERIALIZED))) {
544 "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
545 walk_state->method_node->name.
546 ascii));
547
548 /* 543 /*
549 * Method tried to create an object twice. The probable cause is 544 * Method is not serialized and tried to create an object
550 * that the method cannot handle reentrancy. 545 * twice. The probable cause is that the method cannot
551 * 546 * handle reentrancy. Mark as "pending serialized" now, and
552 * The method is marked not_serialized, but it tried to create 547 * then mark "serialized" when the last thread exits.
553 * a named object, causing the second thread entrance to fail.
554 * Workaround this problem by marking the method permanently
555 * as Serialized.
556 */ 548 */
557 walk_state->method_desc->method.method_flags |= 549 walk_state->method_desc->method.info_flags |=
558 AML_METHOD_SERIALIZED; 550 ACPI_METHOD_SERIALIZED_PENDING;
559 walk_state->method_desc->method.sync_level = 0;
560 } 551 }
561 } 552 }
562 553
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 40e2b279ea12..a5faa1323a02 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index d4b970c3630b..f1464c03aa42 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index fe29eee5adb1..7eda78503422 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8abb9629443d..3312d6368bf1 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c42f067cff9d..8086805d4494 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
47#include "acdispat.h" 47#include "acdispat.h"
48#include "acinterp.h" 48#include "acinterp.h"
49#include "actables.h" 49#include "actables.h"
50#include "amlcode.h"
51 50
52#define _COMPONENT ACPI_PARSER 51#define _COMPONENT ACPI_PARSER
53ACPI_MODULE_NAME("psxface") 52ACPI_MODULE_NAME("psxface")
@@ -285,15 +284,15 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
285 goto cleanup; 284 goto cleanup;
286 } 285 }
287 286
288 if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) { 287 if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
289 walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL; 288 walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
290 } 289 }
291 290
292 /* Invoke an internal method if necessary */ 291 /* Invoke an internal method if necessary */
293 292
294 if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) { 293 if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
295 status = 294 status =
296 info->obj_desc->method.extra.implementation(walk_state); 295 info->obj_desc->method.dispatch.implementation(walk_state);
297 info->return_object = walk_state->return_desc; 296 info->return_object = walk_state->return_desc;
298 297
299 /* Cleanup states */ 298 /* Cleanup states */
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 226c806ae986..9e66f9078426 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index d6ebf7ec622d..3a8a89ec2ca4 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index c80a2eea3a01..4ce6e1147e80 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f859b0386fe4..33db7520c74b 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 1fd868b964fd..f9ea60872aa4 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 33bff17c0bbc..0c7efef008be 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 545da40d7fa7..50b8ad211167 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 7335f22aac20..1bfcef736c50 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 887b8ba8c432..7cc6d8625f1e 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index f8cd9e87d987..410264b22a29 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 491191e6cf69..231811e56939 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 9f6a6e7e1c8e..2ff657a28f26 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index d2ff4325c427..428d44e2d162 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 989d5c867864..a55cb2bb5abb 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 83d7af8d0905..48db0944ce4a 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 34f9c2bc5e1f..0f2d395feaba 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4a8b9e6ea57a..4b7085dfc683 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2010, Intel Corp. 9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index fd2c07d1d3ac..7eb6c6cc1edf 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8f0896281567..0a697351cf69 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 6fef83f04bcd..aded299a2fa8 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index f21c486929a5..a9bcd816dc29 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index ed794cd033ea..31f5a7832ef1 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 22f59ef604e0..18f73c9d10bc 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e87bc6760be6..97dd9bbf055a 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,7 +768,7 @@ acpi_status acpi_ut_init_globals(void)
768 acpi_gbl_gpe_fadt_blocks[0] = NULL; 768 acpi_gbl_gpe_fadt_blocks[0] = NULL;
769 acpi_gbl_gpe_fadt_blocks[1] = NULL; 769 acpi_gbl_gpe_fadt_blocks[1] = NULL;
770 acpi_current_gpe_count = 0; 770 acpi_current_gpe_count = 0;
771 acpi_all_gpes_initialized = FALSE; 771 acpi_gbl_all_gpes_initialized = FALSE;
772 772
773 /* Global handlers */ 773 /* Global handlers */
774 774
@@ -778,6 +778,7 @@ acpi_status acpi_ut_init_globals(void)
778 acpi_gbl_init_handler = NULL; 778 acpi_gbl_init_handler = NULL;
779 acpi_gbl_table_handler = NULL; 779 acpi_gbl_table_handler = NULL;
780 acpi_gbl_interface_handler = NULL; 780 acpi_gbl_interface_handler = NULL;
781 acpi_gbl_global_event_handler = NULL;
781 782
782 /* Global Lock support */ 783 /* Global Lock support */
783 784
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index d2906328535d..b679ea693545 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c1b1c803ea9b..191b6828cce9 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b081cd46a15f..f6bb75c6faf5 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49cf7b7fd816..ce481da9bb45 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c7d0e05ef5a4..c33a852d4f42 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa495b433..a946c689f03b 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -85,6 +85,7 @@ acpi_status acpi_ut_mutex_initialize(void)
85 85
86 spin_lock_init(acpi_gbl_gpe_lock); 86 spin_lock_init(acpi_gbl_gpe_lock);
87 spin_lock_init(acpi_gbl_hardware_lock); 87 spin_lock_init(acpi_gbl_hardware_lock);
88 spin_lock_init(acpi_ev_global_lock_pending_lock);
88 89
89 /* Mutex for _OSI support */ 90 /* Mutex for _OSI support */
90 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); 91 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index fd1fa2749ea5..188340a017b4 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 18c59a85fdca..1fb10cb8f11d 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 7965919000b1..84e051844247 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index d35d109b8da2..30c21e1a9360 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 1f484c9a6888..98ad125e14ff 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6f12e314fbae..916ae097c43c 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2010, Intel Corp. 8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 18df1e940276..ef0581f2094d 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -109,6 +109,8 @@ static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
109 return sizeof(*estatus) + estatus->data_length; 109 return sizeof(*estatus) + estatus->data_length;
110} 110}
111 111
112void apei_estatus_print(const char *pfx,
113 const struct acpi_hest_generic_status *estatus);
112int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); 114int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
113int apei_estatus_check(const struct acpi_hest_generic_status *estatus); 115int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
114#endif 116#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index f4cf2fc4c8c1..31464a006d76 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -46,6 +46,317 @@ u64 cper_next_record_id(void)
46} 46}
47EXPORT_SYMBOL_GPL(cper_next_record_id); 47EXPORT_SYMBOL_GPL(cper_next_record_id);
48 48
49static const char *cper_severity_strs[] = {
50 "recoverable",
51 "fatal",
52 "corrected",
53 "info",
54};
55
56static const char *cper_severity_str(unsigned int severity)
57{
58 return severity < ARRAY_SIZE(cper_severity_strs) ?
59 cper_severity_strs[severity] : "unknown";
60}
61
62/*
63 * cper_print_bits - print strings for set bits
64 * @pfx: prefix for each line, including log level and prefix string
65 * @bits: bit mask
66 * @strs: string array, indexed by bit position
67 * @strs_size: size of the string array: @strs
68 *
69 * For each set bit in @bits, print the corresponding string in @strs.
70 * If the output length is longer than 80, multiple line will be
71 * printed, with @pfx is printed at the beginning of each line.
72 */
73static void cper_print_bits(const char *pfx, unsigned int bits,
74 const char *strs[], unsigned int strs_size)
75{
76 int i, len = 0;
77 const char *str;
78 char buf[84];
79
80 for (i = 0; i < strs_size; i++) {
81 if (!(bits & (1U << i)))
82 continue;
83 str = strs[i];
84 if (len && len + strlen(str) + 2 > 80) {
85 printk("%s\n", buf);
86 len = 0;
87 }
88 if (!len)
89 len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
90 else
91 len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
92 }
93 if (len)
94 printk("%s\n", buf);
95}
96
97static const char *cper_proc_type_strs[] = {
98 "IA32/X64",
99 "IA64",
100};
101
102static const char *cper_proc_isa_strs[] = {
103 "IA32",
104 "IA64",
105 "X64",
106};
107
108static const char *cper_proc_error_type_strs[] = {
109 "cache error",
110 "TLB error",
111 "bus error",
112 "micro-architectural error",
113};
114
115static const char *cper_proc_op_strs[] = {
116 "unknown or generic",
117 "data read",
118 "data write",
119 "instruction execution",
120};
121
122static const char *cper_proc_flag_strs[] = {
123 "restartable",
124 "precise IP",
125 "overflow",
126 "corrected",
127};
128
129static void cper_print_proc_generic(const char *pfx,
130 const struct cper_sec_proc_generic *proc)
131{
132 if (proc->validation_bits & CPER_PROC_VALID_TYPE)
133 printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
134 proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ?
135 cper_proc_type_strs[proc->proc_type] : "unknown");
136 if (proc->validation_bits & CPER_PROC_VALID_ISA)
137 printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
138 proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ?
139 cper_proc_isa_strs[proc->proc_isa] : "unknown");
140 if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
141 printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
142 cper_print_bits(pfx, proc->proc_error_type,
143 cper_proc_error_type_strs,
144 ARRAY_SIZE(cper_proc_error_type_strs));
145 }
146 if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
147 printk("%s""operation: %d, %s\n", pfx, proc->operation,
148 proc->operation < ARRAY_SIZE(cper_proc_op_strs) ?
149 cper_proc_op_strs[proc->operation] : "unknown");
150 if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
151 printk("%s""flags: 0x%02x\n", pfx, proc->flags);
152 cper_print_bits(pfx, proc->flags, cper_proc_flag_strs,
153 ARRAY_SIZE(cper_proc_flag_strs));
154 }
155 if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
156 printk("%s""level: %d\n", pfx, proc->level);
157 if (proc->validation_bits & CPER_PROC_VALID_VERSION)
158 printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
159 if (proc->validation_bits & CPER_PROC_VALID_ID)
160 printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
161 if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
162 printk("%s""target_address: 0x%016llx\n",
163 pfx, proc->target_addr);
164 if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
165 printk("%s""requestor_id: 0x%016llx\n",
166 pfx, proc->requestor_id);
167 if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
168 printk("%s""responder_id: 0x%016llx\n",
169 pfx, proc->responder_id);
170 if (proc->validation_bits & CPER_PROC_VALID_IP)
171 printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
172}
173
174static const char *cper_mem_err_type_strs[] = {
175 "unknown",
176 "no error",
177 "single-bit ECC",
178 "multi-bit ECC",
179 "single-symbol chipkill ECC",
180 "multi-symbol chipkill ECC",
181 "master abort",
182 "target abort",
183 "parity error",
184 "watchdog timeout",
185 "invalid address",
186 "mirror Broken",
187 "memory sparing",
188 "scrub corrected error",
189 "scrub uncorrected error",
190};
191
192static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
193{
194 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
195 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
196 if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
197 printk("%s""physical_address: 0x%016llx\n",
198 pfx, mem->physical_addr);
199 if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
200 printk("%s""physical_address_mask: 0x%016llx\n",
201 pfx, mem->physical_addr_mask);
202 if (mem->validation_bits & CPER_MEM_VALID_NODE)
203 printk("%s""node: %d\n", pfx, mem->node);
204 if (mem->validation_bits & CPER_MEM_VALID_CARD)
205 printk("%s""card: %d\n", pfx, mem->card);
206 if (mem->validation_bits & CPER_MEM_VALID_MODULE)
207 printk("%s""module: %d\n", pfx, mem->module);
208 if (mem->validation_bits & CPER_MEM_VALID_BANK)
209 printk("%s""bank: %d\n", pfx, mem->bank);
210 if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
211 printk("%s""device: %d\n", pfx, mem->device);
212 if (mem->validation_bits & CPER_MEM_VALID_ROW)
213 printk("%s""row: %d\n", pfx, mem->row);
214 if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
215 printk("%s""column: %d\n", pfx, mem->column);
216 if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
217 printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
218 if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
219 printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
220 if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
221 printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
222 if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
223 printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
224 if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
225 u8 etype = mem->error_type;
226 printk("%s""error_type: %d, %s\n", pfx, etype,
227 etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
228 cper_mem_err_type_strs[etype] : "unknown");
229 }
230}
231
232static const char *cper_pcie_port_type_strs[] = {
233 "PCIe end point",
234 "legacy PCI end point",
235 "unknown",
236 "unknown",
237 "root port",
238 "upstream switch port",
239 "downstream switch port",
240 "PCIe to PCI/PCI-X bridge",
241 "PCI/PCI-X to PCIe bridge",
242 "root complex integrated endpoint device",
243 "root complex event collector",
244};
245
246static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
247{
248 if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
249 printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
250 pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
251 cper_pcie_port_type_strs[pcie->port_type] : "unknown");
252 if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
253 printk("%s""version: %d.%d\n", pfx,
254 pcie->version.major, pcie->version.minor);
255 if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
256 printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
257 pcie->command, pcie->status);
258 if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
259 const __u8 *p;
260 printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
261 pcie->device_id.segment, pcie->device_id.bus,
262 pcie->device_id.device, pcie->device_id.function);
263 printk("%s""slot: %d\n", pfx,
264 pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
265 printk("%s""secondary_bus: 0x%02x\n", pfx,
266 pcie->device_id.secondary_bus);
267 printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
268 pcie->device_id.vendor_id, pcie->device_id.device_id);
269 p = pcie->device_id.class_code;
270 printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
271 }
272 if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
273 printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
274 pcie->serial_number.lower, pcie->serial_number.upper);
275 if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
276 printk(
277 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
278 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
279}
280
281static const char *apei_estatus_section_flag_strs[] = {
282 "primary",
283 "containment warning",
284 "reset",
285 "threshold exceeded",
286 "resource not accessible",
287 "latent error",
288};
289
290static void apei_estatus_print_section(
291 const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
292{
293 uuid_le *sec_type = (uuid_le *)gdata->section_type;
294 __u16 severity;
295
296 severity = gdata->error_severity;
297 printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
298 cper_severity_str(severity));
299 printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
300 cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
301 ARRAY_SIZE(apei_estatus_section_flag_strs));
302 if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
303 printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
304 if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
305 printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
306
307 if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
308 struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
309 printk("%s""section_type: general processor error\n", pfx);
310 if (gdata->error_data_length >= sizeof(*proc_err))
311 cper_print_proc_generic(pfx, proc_err);
312 else
313 goto err_section_too_small;
314 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
315 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
316 printk("%s""section_type: memory error\n", pfx);
317 if (gdata->error_data_length >= sizeof(*mem_err))
318 cper_print_mem(pfx, mem_err);
319 else
320 goto err_section_too_small;
321 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
322 struct cper_sec_pcie *pcie = (void *)(gdata + 1);
323 printk("%s""section_type: PCIe error\n", pfx);
324 if (gdata->error_data_length >= sizeof(*pcie))
325 cper_print_pcie(pfx, pcie);
326 else
327 goto err_section_too_small;
328 } else
329 printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
330
331 return;
332
333err_section_too_small:
334 pr_err(FW_WARN "error section length is too small\n");
335}
336
337void apei_estatus_print(const char *pfx,
338 const struct acpi_hest_generic_status *estatus)
339{
340 struct acpi_hest_generic_data *gdata;
341 unsigned int data_len, gedata_len;
342 int sec_no = 0;
343 __u16 severity;
344
345 printk("%s""APEI generic hardware error status\n", pfx);
346 severity = estatus->error_severity;
347 printk("%s""severity: %d, %s\n", pfx, severity,
348 cper_severity_str(severity));
349 data_len = estatus->data_length;
350 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
351 while (data_len > sizeof(*gdata)) {
352 gedata_len = gdata->error_data_length;
353 apei_estatus_print_section(pfx, gdata, sec_no);
354 data_len -= gedata_len + sizeof(*gdata);
355 sec_no++;
356 }
357}
358EXPORT_SYMBOL_GPL(apei_estatus_print);
359
49int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) 360int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
50{ 361{
51 if (estatus->data_length && 362 if (estatus->data_length &&
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d505e59214d..d1d484d4a06a 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,10 +12,6 @@
12 * For more information about Generic Hardware Error Source, please 12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6 13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 * 14 *
15 * Now, only SCI notification type and memory errors are
16 * supported. More notification type and hardware error type will be
17 * added later.
18 *
19 * Copyright 2010 Intel Corp. 15 * Copyright 2010 Intel Corp.
20 * Author: Huang Ying <ying.huang@intel.com> 16 * Author: Huang Ying <ying.huang@intel.com>
21 * 17 *
@@ -39,14 +35,18 @@
39#include <linux/acpi.h> 35#include <linux/acpi.h>
40#include <linux/io.h> 36#include <linux/io.h>
41#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/timer.h>
42#include <linux/cper.h> 39#include <linux/cper.h>
43#include <linux/kdebug.h> 40#include <linux/kdebug.h>
44#include <linux/platform_device.h> 41#include <linux/platform_device.h>
45#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/ratelimit.h>
44#include <linux/vmalloc.h>
46#include <acpi/apei.h> 45#include <acpi/apei.h>
47#include <acpi/atomicio.h> 46#include <acpi/atomicio.h>
48#include <acpi/hed.h> 47#include <acpi/hed.h>
49#include <asm/mce.h> 48#include <asm/mce.h>
49#include <asm/tlbflush.h>
50 50
51#include "apei-internal.h" 51#include "apei-internal.h"
52 52
@@ -55,42 +55,131 @@
55#define GHES_ESTATUS_MAX_SIZE 65536 55#define GHES_ESTATUS_MAX_SIZE 65536
56 56
57/* 57/*
58 * One struct ghes is created for each generic hardware error 58 * One struct ghes is created for each generic hardware error source.
59 * source.
60 *
61 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI 59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
62 * handler. Handler for one generic hardware error source is only 60 * handler.
63 * triggered after the previous one is done. So handler can uses
64 * struct ghes without locking.
65 * 61 *
66 * estatus: memory buffer for error status block, allocated during 62 * estatus: memory buffer for error status block, allocated during
67 * HEST parsing. 63 * HEST parsing.
68 */ 64 */
69#define GHES_TO_CLEAR 0x0001 65#define GHES_TO_CLEAR 0x0001
66#define GHES_EXITING 0x0002
70 67
71struct ghes { 68struct ghes {
72 struct acpi_hest_generic *generic; 69 struct acpi_hest_generic *generic;
73 struct acpi_hest_generic_status *estatus; 70 struct acpi_hest_generic_status *estatus;
74 struct list_head list;
75 u64 buffer_paddr; 71 u64 buffer_paddr;
76 unsigned long flags; 72 unsigned long flags;
73 union {
74 struct list_head list;
75 struct timer_list timer;
76 unsigned int irq;
77 };
77}; 78};
78 79
80static int ghes_panic_timeout __read_mostly = 30;
81
79/* 82/*
80 * Error source lists, one list for each notification method. The 83 * All error sources notified with SCI shares one notifier function,
81 * members in lists are struct ghes. 84 * so they need to be linked and checked one by one. This is applied
85 * to NMI too.
82 * 86 *
83 * The list members are only added in HEST parsing and deleted during 87 * RCU is used for these lists, so ghes_list_mutex is only used for
84 * module_exit, that is, single-threaded. So no lock is needed for 88 * list changing, not for traversing.
85 * that.
86 *
87 * But the mutual exclusion is needed between members adding/deleting
88 * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
89 * used for that.
90 */ 89 */
91static LIST_HEAD(ghes_sci); 90static LIST_HEAD(ghes_sci);
91static LIST_HEAD(ghes_nmi);
92static DEFINE_MUTEX(ghes_list_mutex); 92static DEFINE_MUTEX(ghes_list_mutex);
93 93
94/*
95 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
96 * mutual exclusion.
97 */
98static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
99
100/*
101 * Because the memory area used to transfer hardware error information
102 * from BIOS to Linux can be determined only in NMI, IRQ or timer
103 * handler, but general ioremap can not be used in atomic context, so
104 * a special version of atomic ioremap is implemented for that.
105 */
106
107/*
108 * Two virtual pages are used, one for NMI context, the other for
109 * IRQ/PROCESS context
110 */
111#define GHES_IOREMAP_PAGES 2
112#define GHES_IOREMAP_NMI_PAGE(base) (base)
113#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
114
115/* virtual memory area for atomic ioremap */
116static struct vm_struct *ghes_ioremap_area;
117/*
118 * These 2 spinlock is used to prevent atomic ioremap virtual memory
119 * area from being mapped simultaneously.
120 */
121static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
122static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
123
124static int ghes_ioremap_init(void)
125{
126 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
127 VM_IOREMAP, VMALLOC_START, VMALLOC_END);
128 if (!ghes_ioremap_area) {
129 pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
130 return -ENOMEM;
131 }
132
133 return 0;
134}
135
136static void ghes_ioremap_exit(void)
137{
138 free_vm_area(ghes_ioremap_area);
139}
140
141static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
142{
143 unsigned long vaddr;
144
145 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
146 ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
147 pfn << PAGE_SHIFT, PAGE_KERNEL);
148
149 return (void __iomem *)vaddr;
150}
151
152static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
153{
154 unsigned long vaddr;
155
156 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
157 ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
158 pfn << PAGE_SHIFT, PAGE_KERNEL);
159
160 return (void __iomem *)vaddr;
161}
162
163static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
164{
165 unsigned long vaddr = (unsigned long __force)vaddr_ptr;
166 void *base = ghes_ioremap_area->addr;
167
168 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
169 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
170 __flush_tlb_one(vaddr);
171}
172
173static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
174{
175 unsigned long vaddr = (unsigned long __force)vaddr_ptr;
176 void *base = ghes_ioremap_area->addr;
177
178 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
179 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
180 __flush_tlb_one(vaddr);
181}
182
94static struct ghes *ghes_new(struct acpi_hest_generic *generic) 183static struct ghes *ghes_new(struct acpi_hest_generic *generic)
95{ 184{
96 struct ghes *ghes; 185 struct ghes *ghes;
@@ -101,7 +190,6 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
101 if (!ghes) 190 if (!ghes)
102 return ERR_PTR(-ENOMEM); 191 return ERR_PTR(-ENOMEM);
103 ghes->generic = generic; 192 ghes->generic = generic;
104 INIT_LIST_HEAD(&ghes->list);
105 rc = acpi_pre_map_gar(&generic->error_status_address); 193 rc = acpi_pre_map_gar(&generic->error_status_address);
106 if (rc) 194 if (rc)
107 goto err_free; 195 goto err_free;
@@ -158,22 +246,41 @@ static inline int ghes_severity(int severity)
158 } 246 }
159} 247}
160 248
161/* SCI handler run in work queue, so ioremap can be used here */ 249static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
162static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 250 int from_phys)
163 int from_phys)
164{ 251{
165 void *vaddr; 252 void __iomem *vaddr;
166 253 unsigned long flags = 0;
167 vaddr = ioremap_cache(paddr, len); 254 int in_nmi = in_nmi();
168 if (!vaddr) 255 u64 offset;
169 return -ENOMEM; 256 u32 trunk;
170 if (from_phys) 257
171 memcpy(buffer, vaddr, len); 258 while (len > 0) {
172 else 259 offset = paddr - (paddr & PAGE_MASK);
173 memcpy(vaddr, buffer, len); 260 if (in_nmi) {
174 iounmap(vaddr); 261 raw_spin_lock(&ghes_ioremap_lock_nmi);
175 262 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
176 return 0; 263 } else {
264 spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
265 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
266 }
267 trunk = PAGE_SIZE - offset;
268 trunk = min(trunk, len);
269 if (from_phys)
270 memcpy_fromio(buffer, vaddr + offset, trunk);
271 else
272 memcpy_toio(vaddr + offset, buffer, trunk);
273 len -= trunk;
274 paddr += trunk;
275 buffer += trunk;
276 if (in_nmi) {
277 ghes_iounmap_nmi(vaddr);
278 raw_spin_unlock(&ghes_ioremap_lock_nmi);
279 } else {
280 ghes_iounmap_irq(vaddr);
281 spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
282 }
283 }
177} 284}
178 285
179static int ghes_read_estatus(struct ghes *ghes, int silent) 286static int ghes_read_estatus(struct ghes *ghes, int silent)
@@ -194,10 +301,8 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
194 if (!buf_paddr) 301 if (!buf_paddr)
195 return -ENOENT; 302 return -ENOENT;
196 303
197 rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 304 ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
198 sizeof(*ghes->estatus), 1); 305 sizeof(*ghes->estatus), 1);
199 if (rc)
200 return rc;
201 if (!ghes->estatus->block_status) 306 if (!ghes->estatus->block_status)
202 return -ENOENT; 307 return -ENOENT;
203 308
@@ -212,17 +317,15 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
212 goto err_read_block; 317 goto err_read_block;
213 if (apei_estatus_check_header(ghes->estatus)) 318 if (apei_estatus_check_header(ghes->estatus))
214 goto err_read_block; 319 goto err_read_block;
215 rc = ghes_copy_tofrom_phys(ghes->estatus + 1, 320 ghes_copy_tofrom_phys(ghes->estatus + 1,
216 buf_paddr + sizeof(*ghes->estatus), 321 buf_paddr + sizeof(*ghes->estatus),
217 len - sizeof(*ghes->estatus), 1); 322 len - sizeof(*ghes->estatus), 1);
218 if (rc)
219 return rc;
220 if (apei_estatus_check(ghes->estatus)) 323 if (apei_estatus_check(ghes->estatus))
221 goto err_read_block; 324 goto err_read_block;
222 rc = 0; 325 rc = 0;
223 326
224err_read_block: 327err_read_block:
225 if (rc && !silent) 328 if (rc && !silent && printk_ratelimit())
226 pr_warning(FW_WARN GHES_PFX 329 pr_warning(FW_WARN GHES_PFX
227 "Failed to read error status block!\n"); 330 "Failed to read error status block!\n");
228 return rc; 331 return rc;
@@ -255,11 +358,26 @@ static void ghes_do_proc(struct ghes *ghes)
255 } 358 }
256#endif 359#endif
257 } 360 }
361}
258 362
259 if (!processed && printk_ratelimit()) 363static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
260 pr_warning(GHES_PFX 364{
261 "Unknown error record from generic hardware error source: %d\n", 365 /* Not more than 2 messages every 5 seconds */
262 ghes->generic->header.source_id); 366 static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
367
368 if (pfx == NULL) {
369 if (ghes_severity(ghes->estatus->error_severity) <=
370 GHES_SEV_CORRECTED)
371 pfx = KERN_WARNING HW_ERR;
372 else
373 pfx = KERN_ERR HW_ERR;
374 }
375 if (__ratelimit(&ratelimit)) {
376 printk(
377 "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
378 pfx, ghes->generic->header.source_id);
379 apei_estatus_print(pfx, ghes->estatus);
380 }
263} 381}
264 382
265static int ghes_proc(struct ghes *ghes) 383static int ghes_proc(struct ghes *ghes)
@@ -269,6 +387,7 @@ static int ghes_proc(struct ghes *ghes)
269 rc = ghes_read_estatus(ghes, 0); 387 rc = ghes_read_estatus(ghes, 0);
270 if (rc) 388 if (rc)
271 goto out; 389 goto out;
390 ghes_print_estatus(NULL, ghes);
272 ghes_do_proc(ghes); 391 ghes_do_proc(ghes);
273 392
274out: 393out:
@@ -276,6 +395,42 @@ out:
276 return 0; 395 return 0;
277} 396}
278 397
398static void ghes_add_timer(struct ghes *ghes)
399{
400 struct acpi_hest_generic *g = ghes->generic;
401 unsigned long expire;
402
403 if (!g->notify.poll_interval) {
404 pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
405 g->header.source_id);
406 return;
407 }
408 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
409 ghes->timer.expires = round_jiffies_relative(expire);
410 add_timer(&ghes->timer);
411}
412
413static void ghes_poll_func(unsigned long data)
414{
415 struct ghes *ghes = (void *)data;
416
417 ghes_proc(ghes);
418 if (!(ghes->flags & GHES_EXITING))
419 ghes_add_timer(ghes);
420}
421
422static irqreturn_t ghes_irq_func(int irq, void *data)
423{
424 struct ghes *ghes = data;
425 int rc;
426
427 rc = ghes_proc(ghes);
428 if (rc)
429 return IRQ_NONE;
430
431 return IRQ_HANDLED;
432}
433
279static int ghes_notify_sci(struct notifier_block *this, 434static int ghes_notify_sci(struct notifier_block *this,
280 unsigned long event, void *data) 435 unsigned long event, void *data)
281{ 436{
@@ -292,10 +447,63 @@ static int ghes_notify_sci(struct notifier_block *this,
292 return ret; 447 return ret;
293} 448}
294 449
450static int ghes_notify_nmi(struct notifier_block *this,
451 unsigned long cmd, void *data)
452{
453 struct ghes *ghes, *ghes_global = NULL;
454 int sev, sev_global = -1;
455 int ret = NOTIFY_DONE;
456
457 if (cmd != DIE_NMI)
458 return ret;
459
460 raw_spin_lock(&ghes_nmi_lock);
461 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
462 if (ghes_read_estatus(ghes, 1)) {
463 ghes_clear_estatus(ghes);
464 continue;
465 }
466 sev = ghes_severity(ghes->estatus->error_severity);
467 if (sev > sev_global) {
468 sev_global = sev;
469 ghes_global = ghes;
470 }
471 ret = NOTIFY_STOP;
472 }
473
474 if (ret == NOTIFY_DONE)
475 goto out;
476
477 if (sev_global >= GHES_SEV_PANIC) {
478 oops_begin();
479 ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
480 /* reboot to log the error! */
481 if (panic_timeout == 0)
482 panic_timeout = ghes_panic_timeout;
483 panic("Fatal hardware error!");
484 }
485
486 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
487 if (!(ghes->flags & GHES_TO_CLEAR))
488 continue;
489 /* Do not print estatus because printk is not NMI safe */
490 ghes_do_proc(ghes);
491 ghes_clear_estatus(ghes);
492 }
493
494out:
495 raw_spin_unlock(&ghes_nmi_lock);
496 return ret;
497}
498
295static struct notifier_block ghes_notifier_sci = { 499static struct notifier_block ghes_notifier_sci = {
296 .notifier_call = ghes_notify_sci, 500 .notifier_call = ghes_notify_sci,
297}; 501};
298 502
503static struct notifier_block ghes_notifier_nmi = {
504 .notifier_call = ghes_notify_nmi,
505};
506
299static int __devinit ghes_probe(struct platform_device *ghes_dev) 507static int __devinit ghes_probe(struct platform_device *ghes_dev)
300{ 508{
301 struct acpi_hest_generic *generic; 509 struct acpi_hest_generic *generic;
@@ -306,18 +514,27 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
306 if (!generic->enabled) 514 if (!generic->enabled)
307 return -ENODEV; 515 return -ENODEV;
308 516
309 if (generic->error_block_length < 517 switch (generic->notify.type) {
310 sizeof(struct acpi_hest_generic_status)) { 518 case ACPI_HEST_NOTIFY_POLLED:
311 pr_warning(FW_BUG GHES_PFX 519 case ACPI_HEST_NOTIFY_EXTERNAL:
312"Invalid error block length: %u for generic hardware error source: %d\n", 520 case ACPI_HEST_NOTIFY_SCI:
313 generic->error_block_length, 521 case ACPI_HEST_NOTIFY_NMI:
522 break;
523 case ACPI_HEST_NOTIFY_LOCAL:
524 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
314 generic->header.source_id); 525 generic->header.source_id);
315 goto err; 526 goto err;
527 default:
528 pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
529 generic->notify.type, generic->header.source_id);
530 goto err;
316 } 531 }
317 if (generic->records_to_preallocate == 0) { 532
318 pr_warning(FW_BUG GHES_PFX 533 rc = -EIO;
319"Invalid records to preallocate: %u for generic hardware error source: %d\n", 534 if (generic->error_block_length <
320 generic->records_to_preallocate, 535 sizeof(struct acpi_hest_generic_status)) {
536 pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
537 generic->error_block_length,
321 generic->header.source_id); 538 generic->header.source_id);
322 goto err; 539 goto err;
323 } 540 }
@@ -327,38 +544,43 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
327 ghes = NULL; 544 ghes = NULL;
328 goto err; 545 goto err;
329 } 546 }
330 if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) { 547 switch (generic->notify.type) {
548 case ACPI_HEST_NOTIFY_POLLED:
549 ghes->timer.function = ghes_poll_func;
550 ghes->timer.data = (unsigned long)ghes;
551 init_timer_deferrable(&ghes->timer);
552 ghes_add_timer(ghes);
553 break;
554 case ACPI_HEST_NOTIFY_EXTERNAL:
555 /* External interrupt vector is GSI */
556 if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
557 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
558 generic->header.source_id);
559 goto err;
560 }
561 if (request_irq(ghes->irq, ghes_irq_func,
562 0, "GHES IRQ", ghes)) {
563 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
564 generic->header.source_id);
565 goto err;
566 }
567 break;
568 case ACPI_HEST_NOTIFY_SCI:
331 mutex_lock(&ghes_list_mutex); 569 mutex_lock(&ghes_list_mutex);
332 if (list_empty(&ghes_sci)) 570 if (list_empty(&ghes_sci))
333 register_acpi_hed_notifier(&ghes_notifier_sci); 571 register_acpi_hed_notifier(&ghes_notifier_sci);
334 list_add_rcu(&ghes->list, &ghes_sci); 572 list_add_rcu(&ghes->list, &ghes_sci);
335 mutex_unlock(&ghes_list_mutex); 573 mutex_unlock(&ghes_list_mutex);
336 } else { 574 break;
337 unsigned char *notify = NULL; 575 case ACPI_HEST_NOTIFY_NMI:
338 576 mutex_lock(&ghes_list_mutex);
339 switch (generic->notify.type) { 577 if (list_empty(&ghes_nmi))
340 case ACPI_HEST_NOTIFY_POLLED: 578 register_die_notifier(&ghes_notifier_nmi);
341 notify = "POLL"; 579 list_add_rcu(&ghes->list, &ghes_nmi);
342 break; 580 mutex_unlock(&ghes_list_mutex);
343 case ACPI_HEST_NOTIFY_EXTERNAL: 581 break;
344 case ACPI_HEST_NOTIFY_LOCAL: 582 default:
345 notify = "IRQ"; 583 BUG();
346 break;
347 case ACPI_HEST_NOTIFY_NMI:
348 notify = "NMI";
349 break;
350 }
351 if (notify) {
352 pr_warning(GHES_PFX
353"Generic hardware error source: %d notified via %s is not supported!\n",
354 generic->header.source_id, notify);
355 } else {
356 pr_warning(FW_WARN GHES_PFX
357"Unknown notification type: %u for generic hardware error source: %d\n",
358 generic->notify.type, generic->header.source_id);
359 }
360 rc = -ENODEV;
361 goto err;
362 } 584 }
363 platform_set_drvdata(ghes_dev, ghes); 585 platform_set_drvdata(ghes_dev, ghes);
364 586
@@ -379,7 +601,14 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
379 ghes = platform_get_drvdata(ghes_dev); 601 ghes = platform_get_drvdata(ghes_dev);
380 generic = ghes->generic; 602 generic = ghes->generic;
381 603
604 ghes->flags |= GHES_EXITING;
382 switch (generic->notify.type) { 605 switch (generic->notify.type) {
606 case ACPI_HEST_NOTIFY_POLLED:
607 del_timer_sync(&ghes->timer);
608 break;
609 case ACPI_HEST_NOTIFY_EXTERNAL:
610 free_irq(ghes->irq, ghes);
611 break;
383 case ACPI_HEST_NOTIFY_SCI: 612 case ACPI_HEST_NOTIFY_SCI:
384 mutex_lock(&ghes_list_mutex); 613 mutex_lock(&ghes_list_mutex);
385 list_del_rcu(&ghes->list); 614 list_del_rcu(&ghes->list);
@@ -387,12 +616,23 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
387 unregister_acpi_hed_notifier(&ghes_notifier_sci); 616 unregister_acpi_hed_notifier(&ghes_notifier_sci);
388 mutex_unlock(&ghes_list_mutex); 617 mutex_unlock(&ghes_list_mutex);
389 break; 618 break;
619 case ACPI_HEST_NOTIFY_NMI:
620 mutex_lock(&ghes_list_mutex);
621 list_del_rcu(&ghes->list);
622 if (list_empty(&ghes_nmi))
623 unregister_die_notifier(&ghes_notifier_nmi);
624 mutex_unlock(&ghes_list_mutex);
625 /*
626 * To synchronize with NMI handler, ghes can only be
627 * freed after NMI handler finishes.
628 */
629 synchronize_rcu();
630 break;
390 default: 631 default:
391 BUG(); 632 BUG();
392 break; 633 break;
393 } 634 }
394 635
395 synchronize_rcu();
396 ghes_fini(ghes); 636 ghes_fini(ghes);
397 kfree(ghes); 637 kfree(ghes);
398 638
@@ -412,6 +652,8 @@ static struct platform_driver ghes_platform_driver = {
412 652
413static int __init ghes_init(void) 653static int __init ghes_init(void)
414{ 654{
655 int rc;
656
415 if (acpi_disabled) 657 if (acpi_disabled)
416 return -ENODEV; 658 return -ENODEV;
417 659
@@ -420,12 +662,25 @@ static int __init ghes_init(void)
420 return -EINVAL; 662 return -EINVAL;
421 } 663 }
422 664
423 return platform_driver_register(&ghes_platform_driver); 665 rc = ghes_ioremap_init();
666 if (rc)
667 goto err;
668
669 rc = platform_driver_register(&ghes_platform_driver);
670 if (rc)
671 goto err_ioremap_exit;
672
673 return 0;
674err_ioremap_exit:
675 ghes_ioremap_exit();
676err:
677 return rc;
424} 678}
425 679
426static void __exit ghes_exit(void) 680static void __exit ghes_exit(void)
427{ 681{
428 platform_driver_unregister(&ghes_platform_driver); 682 platform_driver_unregister(&ghes_platform_driver);
683 ghes_ioremap_exit();
429} 684}
430 685
431module_init(ghes_init); 686module_init(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index daa7bc63f1d4..abda3786a5d7 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -195,24 +195,24 @@ static int __init setup_hest_disable(char *str)
195 195
196__setup("hest_disable", setup_hest_disable); 196__setup("hest_disable", setup_hest_disable);
197 197
198static int __init hest_init(void) 198void __init acpi_hest_init(void)
199{ 199{
200 acpi_status status; 200 acpi_status status;
201 int rc = -ENODEV; 201 int rc = -ENODEV;
202 unsigned int ghes_count = 0; 202 unsigned int ghes_count = 0;
203 203
204 if (acpi_disabled)
205 goto err;
206
207 if (hest_disable) { 204 if (hest_disable) {
208 pr_info(HEST_PFX "HEST tabling parsing is disabled.\n"); 205 pr_info(HEST_PFX "Table parsing disabled.\n");
209 goto err; 206 return;
210 } 207 }
211 208
209 if (acpi_disabled)
210 goto err;
211
212 status = acpi_get_table(ACPI_SIG_HEST, 0, 212 status = acpi_get_table(ACPI_SIG_HEST, 0,
213 (struct acpi_table_header **)&hest_tab); 213 (struct acpi_table_header **)&hest_tab);
214 if (status == AE_NOT_FOUND) { 214 if (status == AE_NOT_FOUND) {
215 pr_info(HEST_PFX "Table is not found!\n"); 215 pr_info(HEST_PFX "Table not found.\n");
216 goto err; 216 goto err;
217 } else if (ACPI_FAILURE(status)) { 217 } else if (ACPI_FAILURE(status)) {
218 const char *msg = acpi_format_exception(status); 218 const char *msg = acpi_format_exception(status);
@@ -226,15 +226,11 @@ static int __init hest_init(void)
226 goto err; 226 goto err;
227 227
228 rc = hest_ghes_dev_register(ghes_count); 228 rc = hest_ghes_dev_register(ghes_count);
229 if (rc) 229 if (!rc) {
230 goto err; 230 pr_info(HEST_PFX "Table parsing has been initialized.\n");
231 231 return;
232 pr_info(HEST_PFX "HEST table parsing is initialized.\n"); 232 }
233 233
234 return 0;
235err: 234err:
236 hest_disable = 1; 235 hest_disable = 1;
237 return rc;
238} 236}
239
240subsys_initcall(hest_init);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 95649d373071..ac1a599f5147 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -631,6 +631,17 @@ static int acpi_battery_update(struct acpi_battery *battery)
631 return result; 631 return result;
632} 632}
633 633
634static void acpi_battery_refresh(struct acpi_battery *battery)
635{
636 if (!battery->bat.dev)
637 return;
638
639 acpi_battery_get_info(battery);
640 /* The battery may have changed its reporting units. */
641 sysfs_remove_battery(battery);
642 sysfs_add_battery(battery);
643}
644
634/* -------------------------------------------------------------------------- 645/* --------------------------------------------------------------------------
635 FS Interface (/proc) 646 FS Interface (/proc)
636 -------------------------------------------------------------------------- */ 647 -------------------------------------------------------------------------- */
@@ -868,6 +879,8 @@ static int acpi_battery_add_fs(struct acpi_device *device)
868 struct proc_dir_entry *entry = NULL; 879 struct proc_dir_entry *entry = NULL;
869 int i; 880 int i;
870 881
882 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
883 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
871 if (!acpi_device_dir(device)) { 884 if (!acpi_device_dir(device)) {
872 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 885 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
873 acpi_battery_dir); 886 acpi_battery_dir);
@@ -914,6 +927,8 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
914 if (!battery) 927 if (!battery)
915 return; 928 return;
916 old = battery->bat.dev; 929 old = battery->bat.dev;
930 if (event == ACPI_BATTERY_NOTIFY_INFO)
931 acpi_battery_refresh(battery);
917 acpi_battery_update(battery); 932 acpi_battery_update(battery);
918 acpi_bus_generate_proc_event(device, event, 933 acpi_bus_generate_proc_event(device, event,
919 acpi_battery_present(battery)); 934 acpi_battery_present(battery));
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d68bd61072bb..7ced61f39492 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -52,22 +52,6 @@ EXPORT_SYMBOL(acpi_root_dir);
52 52
53#define STRUCT_TO_INT(s) (*((int*)&s)) 53#define STRUCT_TO_INT(s) (*((int*)&s))
54 54
55static int set_power_nocheck(const struct dmi_system_id *id)
56{
57 printk(KERN_NOTICE PREFIX "%s detected - "
58 "disable power check in power transition\n", id->ident);
59 acpi_power_nocheck = 1;
60 return 0;
61}
62static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
63 {
64 set_power_nocheck, "HP Pavilion 05", {
65 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
66 DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
67 DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
68 {},
69};
70
71 55
72#ifdef CONFIG_X86 56#ifdef CONFIG_X86
73static int set_copy_dsdt(const struct dmi_system_id *id) 57static int set_copy_dsdt(const struct dmi_system_id *id)
@@ -196,33 +180,24 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
196 Power Management 180 Power Management
197 -------------------------------------------------------------------------- */ 181 -------------------------------------------------------------------------- */
198 182
199int acpi_bus_get_power(acpi_handle handle, int *state) 183static int __acpi_bus_get_power(struct acpi_device *device, int *state)
200{ 184{
201 int result = 0; 185 int result = 0;
202 acpi_status status = 0; 186 acpi_status status = 0;
203 struct acpi_device *device = NULL;
204 unsigned long long psc = 0; 187 unsigned long long psc = 0;
205 188
206 189 if (!device || !state)
207 result = acpi_bus_get_device(handle, &device); 190 return -EINVAL;
208 if (result)
209 return result;
210 191
211 *state = ACPI_STATE_UNKNOWN; 192 *state = ACPI_STATE_UNKNOWN;
212 193
213 if (!device->flags.power_manageable) { 194 if (device->flags.power_manageable) {
214 /* TBD: Non-recursive algorithm for walking up hierarchy */
215 if (device->parent)
216 *state = device->parent->power.state;
217 else
218 *state = ACPI_STATE_D0;
219 } else {
220 /* 195 /*
221 * Get the device's power state either directly (via _PSC) or 196 * Get the device's power state either directly (via _PSC) or
222 * indirectly (via power resources). 197 * indirectly (via power resources).
223 */ 198 */
224 if (device->power.flags.power_resources) { 199 if (device->power.flags.power_resources) {
225 result = acpi_power_get_inferred_state(device); 200 result = acpi_power_get_inferred_state(device, state);
226 if (result) 201 if (result)
227 return result; 202 return result;
228 } else if (device->power.flags.explicit_get) { 203 } else if (device->power.flags.explicit_get) {
@@ -230,59 +205,33 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
230 NULL, &psc); 205 NULL, &psc);
231 if (ACPI_FAILURE(status)) 206 if (ACPI_FAILURE(status))
232 return -ENODEV; 207 return -ENODEV;
233 device->power.state = (int)psc; 208 *state = (int)psc;
234 } 209 }
235 210 } else {
236 *state = device->power.state; 211 /* TBD: Non-recursive algorithm for walking up hierarchy. */
212 *state = device->parent ?
213 device->parent->power.state : ACPI_STATE_D0;
237 } 214 }
238 215
239 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", 216 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
240 device->pnp.bus_id, device->power.state)); 217 device->pnp.bus_id, *state));
241 218
242 return 0; 219 return 0;
243} 220}
244 221
245EXPORT_SYMBOL(acpi_bus_get_power);
246 222
247int acpi_bus_set_power(acpi_handle handle, int state) 223static int __acpi_bus_set_power(struct acpi_device *device, int state)
248{ 224{
249 int result = 0; 225 int result = 0;
250 acpi_status status = AE_OK; 226 acpi_status status = AE_OK;
251 struct acpi_device *device = NULL;
252 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; 227 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
253 228
254 229 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
255 result = acpi_bus_get_device(handle, &device);
256 if (result)
257 return result;
258
259 if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
260 return -EINVAL; 230 return -EINVAL;
261 231
262 /* Make sure this is a valid target state */ 232 /* Make sure this is a valid target state */
263 233
264 if (!device->flags.power_manageable) { 234 if (state == device->power.state) {
265 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
266 kobject_name(&device->dev.kobj)));
267 return -ENODEV;
268 }
269 /*
270 * Get device's current power state
271 */
272 if (!acpi_power_nocheck) {
273 /*
274 * Maybe the incorrect power state is returned on the bogus
275 * bios, which is different with the real power state.
276 * For example: the bios returns D0 state and the real power
277 * state is D3. OS expects to set the device to D0 state. In
278 * such case if OS uses the power state returned by the BIOS,
279 * the device can't be transisted to the correct power state.
280 * So if the acpi_power_nocheck is set, it is unnecessary to
281 * get the power state by calling acpi_bus_get_power.
282 */
283 acpi_bus_get_power(device->handle, &device->power.state);
284 }
285 if ((state == device->power.state) && !device->flags.force_power_state) {
286 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
287 state)); 236 state));
288 return 0; 237 return 0;
@@ -351,8 +300,75 @@ int acpi_bus_set_power(acpi_handle handle, int state)
351 return result; 300 return result;
352} 301}
353 302
303
304int acpi_bus_set_power(acpi_handle handle, int state)
305{
306 struct acpi_device *device;
307 int result;
308
309 result = acpi_bus_get_device(handle, &device);
310 if (result)
311 return result;
312
313 if (!device->flags.power_manageable) {
314 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
315 "Device [%s] is not power manageable\n",
316 dev_name(&device->dev)));
317 return -ENODEV;
318 }
319
320 return __acpi_bus_set_power(device, state);
321}
354EXPORT_SYMBOL(acpi_bus_set_power); 322EXPORT_SYMBOL(acpi_bus_set_power);
355 323
324
325int acpi_bus_init_power(struct acpi_device *device)
326{
327 int state;
328 int result;
329
330 if (!device)
331 return -EINVAL;
332
333 device->power.state = ACPI_STATE_UNKNOWN;
334
335 result = __acpi_bus_get_power(device, &state);
336 if (result)
337 return result;
338
339 if (device->power.flags.power_resources)
340 result = acpi_power_on_resources(device, state);
341
342 if (!result)
343 device->power.state = state;
344
345 return result;
346}
347
348
349int acpi_bus_update_power(acpi_handle handle, int *state_p)
350{
351 struct acpi_device *device;
352 int state;
353 int result;
354
355 result = acpi_bus_get_device(handle, &device);
356 if (result)
357 return result;
358
359 result = __acpi_bus_get_power(device, &state);
360 if (result)
361 return result;
362
363 result = __acpi_bus_set_power(device, state);
364 if (!result && state_p)
365 *state_p = state;
366
367 return result;
368}
369EXPORT_SYMBOL_GPL(acpi_bus_update_power);
370
371
356bool acpi_bus_power_manageable(acpi_handle handle) 372bool acpi_bus_power_manageable(acpi_handle handle)
357{ 373{
358 struct acpi_device *device; 374 struct acpi_device *device;
@@ -1023,15 +1039,8 @@ static int __init acpi_init(void)
1023 if (acpi_disabled) 1039 if (acpi_disabled)
1024 return result; 1040 return result;
1025 1041
1026 /*
1027 * If the laptop falls into the DMI check table, the power state check
1028 * will be disabled in the course of device power transition.
1029 */
1030 dmi_check_system(power_nocheck_dmi_table);
1031
1032 acpi_scan_init(); 1042 acpi_scan_init();
1033 acpi_ec_init(); 1043 acpi_ec_init();
1034 acpi_power_init();
1035 acpi_debugfs_init(); 1044 acpi_debugfs_init();
1036 acpi_sleep_proc_init(); 1045 acpi_sleep_proc_init();
1037 acpi_wakeup_device_init(); 1046 acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 71ef9cd0735f..76bbb78a5ad9 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -279,6 +279,9 @@ static int acpi_lid_send_state(struct acpi_device *device)
279 input_report_switch(button->input, SW_LID, !state); 279 input_report_switch(button->input, SW_LID, !state);
280 input_sync(button->input); 280 input_sync(button->input);
281 281
282 if (state)
283 pm_wakeup_event(&device->dev, 0);
284
282 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 285 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
283 if (ret == NOTIFY_DONE) 286 if (ret == NOTIFY_DONE)
284 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, 287 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -314,6 +317,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
314 input_sync(input); 317 input_sync(input);
315 input_report_key(input, keycode, 0); 318 input_report_key(input, keycode, 0);
316 input_sync(input); 319 input_sync(input);
320
321 pm_wakeup_event(&device->dev, 0);
317 } 322 }
318 323
319 acpi_bus_generate_proc_event(device, event, ++button->pushed); 324 acpi_bus_generate_proc_event(device, event, ++button->pushed);
@@ -426,7 +431,7 @@ static int acpi_button_add(struct acpi_device *device)
426 acpi_enable_gpe(device->wakeup.gpe_device, 431 acpi_enable_gpe(device->wakeup.gpe_device,
427 device->wakeup.gpe_number); 432 device->wakeup.gpe_number);
428 device->wakeup.run_wake_count++; 433 device->wakeup.run_wake_count++;
429 device->wakeup.state.enabled = 1; 434 device_set_wakeup_enable(&device->dev, true);
430 } 435 }
431 436
432 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); 437 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -449,7 +454,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
449 acpi_disable_gpe(device->wakeup.gpe_device, 454 acpi_disable_gpe(device->wakeup.gpe_device,
450 device->wakeup.gpe_number); 455 device->wakeup.gpe_number);
451 device->wakeup.run_wake_count--; 456 device->wakeup.run_wake_count--;
452 device->wakeup.state.enabled = 0; 457 device_set_wakeup_enable(&device->dev, false);
453 } 458 }
454 459
455 acpi_button_remove_fs(device); 460 acpi_button_remove_fs(device);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 81514a4918cc..1864ad3cf895 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,7 +725,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
725 complete_dock(ds); 725 complete_dock(ds);
726 dock_event(ds, event, DOCK_EVENT); 726 dock_event(ds, event, DOCK_EVENT);
727 dock_lock(ds, 1); 727 dock_lock(ds, 1);
728 acpi_update_gpes(); 728 acpi_update_all_gpes();
729 break; 729 break;
730 } 730 }
731 if (dock_present(ds) || dock_in_progress(ds)) 731 if (dock_present(ds) || dock_in_progress(ds))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 302b31ed31f1..fa848c4116a8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -606,7 +606,8 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
606 return 0; 606 return 0;
607} 607}
608 608
609static u32 acpi_ec_gpe_handler(void *data) 609static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
610 u32 gpe_number, void *data)
610{ 611{
611 struct acpi_ec *ec = data; 612 struct acpi_ec *ec = data;
612 613
@@ -618,7 +619,7 @@ static u32 acpi_ec_gpe_handler(void *data)
618 wake_up(&ec->wait); 619 wake_up(&ec->wait);
619 ec_check_sci(ec, acpi_ec_read_status(ec)); 620 ec_check_sci(ec, acpi_ec_read_status(ec));
620 } 621 }
621 return ACPI_INTERRUPT_HANDLED; 622 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
622} 623}
623 624
624/* -------------------------------------------------------------------------- 625/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 60049080c869..467479f07c1f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -86,7 +86,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
86 if (!device) 86 if (!device)
87 return -EINVAL; 87 return -EINVAL;
88 88
89 result = acpi_bus_get_power(device->handle, &acpi_state); 89 result = acpi_bus_update_power(device->handle, &acpi_state);
90 if (result) 90 if (result)
91 return result; 91 return result;
92 92
@@ -123,7 +123,6 @@ static struct thermal_cooling_device_ops fan_cooling_ops = {
123static int acpi_fan_add(struct acpi_device *device) 123static int acpi_fan_add(struct acpi_device *device)
124{ 124{
125 int result = 0; 125 int result = 0;
126 int state = 0;
127 struct thermal_cooling_device *cdev; 126 struct thermal_cooling_device *cdev;
128 127
129 if (!device) 128 if (!device)
@@ -132,16 +131,12 @@ static int acpi_fan_add(struct acpi_device *device)
132 strcpy(acpi_device_name(device), "Fan"); 131 strcpy(acpi_device_name(device), "Fan");
133 strcpy(acpi_device_class(device), ACPI_FAN_CLASS); 132 strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
134 133
135 result = acpi_bus_get_power(device->handle, &state); 134 result = acpi_bus_update_power(device->handle, NULL);
136 if (result) { 135 if (result) {
137 printk(KERN_ERR PREFIX "Reading power state\n"); 136 printk(KERN_ERR PREFIX "Setting initial power state\n");
138 goto end; 137 goto end;
139 } 138 }
140 139
141 device->flags.force_power_state = 1;
142 acpi_bus_set_power(device->handle, state);
143 device->flags.force_power_state = 0;
144
145 cdev = thermal_cooling_device_register("Fan", device, 140 cdev = thermal_cooling_device_register("Fan", device,
146 &fan_cooling_ops); 141 &fan_cooling_ops);
147 if (IS_ERR(cdev)) { 142 if (IS_ERR(cdev)) {
@@ -200,22 +195,14 @@ static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
200 195
201static int acpi_fan_resume(struct acpi_device *device) 196static int acpi_fan_resume(struct acpi_device *device)
202{ 197{
203 int result = 0; 198 int result;
204 int power_state = 0;
205 199
206 if (!device) 200 if (!device)
207 return -EINVAL; 201 return -EINVAL;
208 202
209 result = acpi_bus_get_power(device->handle, &power_state); 203 result = acpi_bus_update_power(device->handle, NULL);
210 if (result) { 204 if (result)
211 printk(KERN_ERR PREFIX 205 printk(KERN_ERR PREFIX "Error updating fan power state\n");
212 "Error reading fan power state\n");
213 return result;
214 }
215
216 device->flags.force_power_state = 1;
217 acpi_bus_set_power(device->handle, power_state);
218 device->flags.force_power_state = 0;
219 206
220 return result; 207 return result;
221} 208}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 78b0164c35b2..7c47ed55e528 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -167,11 +167,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
167 "firmware_node"); 167 "firmware_node");
168 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 168 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
169 "physical_node"); 169 "physical_node");
170 if (acpi_dev->wakeup.flags.valid) { 170 if (acpi_dev->wakeup.flags.valid)
171 device_set_wakeup_capable(dev, true); 171 device_set_wakeup_capable(dev, true);
172 device_set_wakeup_enable(dev,
173 acpi_dev->wakeup.state.enabled);
174 }
175 } 172 }
176 173
177 return 0; 174 return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a212bfeddf8c..b1cc81a0431b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,9 +41,10 @@ static inline int acpi_debugfs_init(void) { return 0; }
41int acpi_power_init(void); 41int acpi_power_init(void);
42int acpi_device_sleep_wake(struct acpi_device *dev, 42int acpi_device_sleep_wake(struct acpi_device *dev,
43 int enable, int sleep_state, int dev_state); 43 int enable, int sleep_state, int dev_state);
44int acpi_power_get_inferred_state(struct acpi_device *device); 44int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
45int acpi_power_on_resources(struct acpi_device *device, int state);
45int acpi_power_transition(struct acpi_device *device, int state); 46int acpi_power_transition(struct acpi_device *device, int state);
46extern int acpi_power_nocheck; 47int acpi_bus_init_power(struct acpi_device *device);
47 48
48int acpi_wakeup_device_init(void); 49int acpi_wakeup_device_init(void);
49void acpi_early_processor_set_pdc(void); 50void acpi_early_processor_set_pdc(void);
@@ -82,8 +83,16 @@ extern int acpi_sleep_init(void);
82 83
83#ifdef CONFIG_ACPI_SLEEP 84#ifdef CONFIG_ACPI_SLEEP
84int acpi_sleep_proc_init(void); 85int acpi_sleep_proc_init(void);
86int suspend_nvs_alloc(void);
87void suspend_nvs_free(void);
88int suspend_nvs_save(void);
89void suspend_nvs_restore(void);
85#else 90#else
86static inline int acpi_sleep_proc_init(void) { return 0; } 91static inline int acpi_sleep_proc_init(void) { return 0; }
92static inline int suspend_nvs_alloc(void) { return 0; }
93static inline void suspend_nvs_free(void) {}
94static inline int suspend_nvs_save(void) { return 0; }
95static inline void suspend_nvs_restore(void) {}
87#endif 96#endif
88 97
89#endif /* _ACPI_INTERNAL_H_ */ 98#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d9926afec110..5eb25eb3ea48 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id,
275int __init acpi_numa_init(void) 275int __init acpi_numa_init(void)
276{ 276{
277 int ret = 0; 277 int ret = 0;
278 int nr_cpu_entries = nr_cpu_ids;
279 278
280#ifdef CONFIG_X86
281 /* 279 /*
282 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= 280 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
283 * SRAT cpu entries could have different order with that in MADT. 281 * SRAT cpu entries could have different order with that in MADT.
284 * So go over all cpu entries in SRAT to get apicid to node mapping. 282 * So go over all cpu entries in SRAT to get apicid to node mapping.
285 */ 283 */
286 nr_cpu_entries = MAX_LOCAL_APIC;
287#endif
288 284
289 /* SRAT: Static Resource Affinity Table */ 285 /* SRAT: Static Resource Affinity Table */
290 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 286 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
291 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 287 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
292 acpi_parse_x2apic_affinity, nr_cpu_entries); 288 acpi_parse_x2apic_affinity, 0);
293 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 289 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
294 acpi_parse_processor_affinity, nr_cpu_entries); 290 acpi_parse_processor_affinity, 0);
295 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 291 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
296 acpi_parse_memory_affinity, 292 acpi_parse_memory_affinity,
297 NR_NODE_MEMBLKS); 293 NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
new file mode 100644
index 000000000000..fa5a1df42b79
--- /dev/null
+++ b/drivers/acpi/nvs.c
@@ -0,0 +1,145 @@
1/*
2 * nvs.c - Routines for saving and restoring ACPI NVS memory region
3 *
4 * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/acpi.h>
15#include <linux/acpi_io.h>
16#include <acpi/acpiosxf.h>
17
18/*
19 * Platforms, like ACPI, may want us to save some memory used by them during
20 * suspend and to restore the contents of this memory during the subsequent
21 * resume. The code below implements a mechanism allowing us to do that.
22 */
23
24struct nvs_page {
25 unsigned long phys_start;
26 unsigned int size;
27 void *kaddr;
28 void *data;
29 struct list_head node;
30};
31
32static LIST_HEAD(nvs_list);
33
34/**
35 * suspend_nvs_register - register platform NVS memory region to save
36 * @start - physical address of the region
37 * @size - size of the region
38 *
39 * The NVS region need not be page-aligned (both ends) and we arrange
40 * things so that the data from page-aligned addresses in this region will
41 * be copied into separate RAM pages.
42 */
43int suspend_nvs_register(unsigned long start, unsigned long size)
44{
45 struct nvs_page *entry, *next;
46
47 while (size > 0) {
48 unsigned int nr_bytes;
49
50 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
51 if (!entry)
52 goto Error;
53
54 list_add_tail(&entry->node, &nvs_list);
55 entry->phys_start = start;
56 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
57 entry->size = (size < nr_bytes) ? size : nr_bytes;
58
59 start += entry->size;
60 size -= entry->size;
61 }
62 return 0;
63
64 Error:
65 list_for_each_entry_safe(entry, next, &nvs_list, node) {
66 list_del(&entry->node);
67 kfree(entry);
68 }
69 return -ENOMEM;
70}
71
72/**
73 * suspend_nvs_free - free data pages allocated for saving NVS regions
74 */
75void suspend_nvs_free(void)
76{
77 struct nvs_page *entry;
78
79 list_for_each_entry(entry, &nvs_list, node)
80 if (entry->data) {
81 free_page((unsigned long)entry->data);
82 entry->data = NULL;
83 if (entry->kaddr) {
84 iounmap(entry->kaddr);
85 entry->kaddr = NULL;
86 }
87 }
88}
89
90/**
91 * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
92 */
93int suspend_nvs_alloc(void)
94{
95 struct nvs_page *entry;
96
97 list_for_each_entry(entry, &nvs_list, node) {
98 entry->data = (void *)__get_free_page(GFP_KERNEL);
99 if (!entry->data) {
100 suspend_nvs_free();
101 return -ENOMEM;
102 }
103 }
104 return 0;
105}
106
107/**
108 * suspend_nvs_save - save NVS memory regions
109 */
110int suspend_nvs_save(void)
111{
112 struct nvs_page *entry;
113
114 printk(KERN_INFO "PM: Saving platform NVS memory\n");
115
116 list_for_each_entry(entry, &nvs_list, node)
117 if (entry->data) {
118 entry->kaddr = acpi_os_ioremap(entry->phys_start,
119 entry->size);
120 if (!entry->kaddr) {
121 suspend_nvs_free();
122 return -ENOMEM;
123 }
124 memcpy(entry->data, entry->kaddr, entry->size);
125 }
126
127 return 0;
128}
129
130/**
131 * suspend_nvs_restore - restore NVS memory regions
132 *
133 * This function is going to be called with interrupts disabled, so it
134 * cannot iounmap the virtual addresses used to access the NVS region.
135 */
136void suspend_nvs_restore(void)
137{
138 struct nvs_page *entry;
139
140 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
141
142 list_for_each_entry(entry, &nvs_list, node)
143 if (entry->data)
144 memcpy(entry->kaddr, entry->data, entry->size);
145}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 055d7b701fff..b0931818cf98 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -38,6 +38,7 @@
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/nmi.h> 39#include <linux/nmi.h>
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41#include <linux/acpi_io.h>
41#include <linux/efi.h> 42#include <linux/efi.h>
42#include <linux/ioport.h> 43#include <linux/ioport.h>
43#include <linux/list.h> 44#include <linux/list.h>
@@ -302,9 +303,10 @@ void __iomem *__init_refok
302acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 303acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
303{ 304{
304 struct acpi_ioremap *map, *tmp_map; 305 struct acpi_ioremap *map, *tmp_map;
305 unsigned long flags, pg_sz; 306 unsigned long flags;
306 void __iomem *virt; 307 void __iomem *virt;
307 phys_addr_t pg_off; 308 acpi_physical_address pg_off;
309 acpi_size pg_sz;
308 310
309 if (phys > ULONG_MAX) { 311 if (phys > ULONG_MAX) {
310 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 312 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -320,7 +322,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
320 322
321 pg_off = round_down(phys, PAGE_SIZE); 323 pg_off = round_down(phys, PAGE_SIZE);
322 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 324 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
323 virt = ioremap(pg_off, pg_sz); 325 virt = acpi_os_ioremap(pg_off, pg_sz);
324 if (!virt) { 326 if (!virt) {
325 kfree(map); 327 kfree(map);
326 return NULL; 328 return NULL;
@@ -642,7 +644,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
642 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 644 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
643 rcu_read_unlock(); 645 rcu_read_unlock();
644 if (!virt_addr) { 646 if (!virt_addr) {
645 virt_addr = ioremap(phys_addr, size); 647 virt_addr = acpi_os_ioremap(phys_addr, size);
646 unmap = 1; 648 unmap = 1;
647 } 649 }
648 if (!value) 650 if (!value)
@@ -678,7 +680,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
678 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 680 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
679 rcu_read_unlock(); 681 rcu_read_unlock();
680 if (!virt_addr) { 682 if (!virt_addr) {
681 virt_addr = ioremap(phys_addr, size); 683 virt_addr = acpi_os_ioremap(phys_addr, size);
682 unmap = 1; 684 unmap = 1;
683 } 685 }
684 686
@@ -1233,8 +1235,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1233int acpi_check_resource_conflict(const struct resource *res) 1235int acpi_check_resource_conflict(const struct resource *res)
1234{ 1236{
1235 struct acpi_res_list *res_list_elem; 1237 struct acpi_res_list *res_list_elem;
1236 int ioport; 1238 int ioport = 0, clash = 0;
1237 int clash = 0;
1238 1239
1239 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1240 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1240 return 0; 1241 return 0;
@@ -1264,9 +1265,13 @@ int acpi_check_resource_conflict(const struct resource *res)
1264 if (clash) { 1265 if (clash) {
1265 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1266 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1266 printk(KERN_WARNING "ACPI: resource %s %pR" 1267 printk(KERN_WARNING "ACPI: resource %s %pR"
1267 " conflicts with ACPI region %s %pR\n", 1268 " conflicts with ACPI region %s "
1269 "[%s 0x%zx-0x%zx]\n",
1268 res->name, res, res_list_elem->name, 1270 res->name, res, res_list_elem->name,
1269 res_list_elem); 1271 (res_list_elem->resource_type ==
1272 ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
1273 (size_t) res_list_elem->start,
1274 (size_t) res_list_elem->end);
1270 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1275 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1271 printk(KERN_NOTICE "ACPI: This conflict may" 1276 printk(KERN_NOTICE "ACPI: This conflict may"
1272 " cause random problems and system" 1277 " cause random problems and system"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 96668ad09622..85249395623b 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,6 +36,7 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <acpi/acpi_bus.h> 37#include <acpi/acpi_bus.h>
38#include <acpi/acpi_drivers.h> 38#include <acpi/acpi_drivers.h>
39#include <acpi/apei.h>
39 40
40#define PREFIX "ACPI: " 41#define PREFIX "ACPI: "
41 42
@@ -47,6 +48,11 @@ static int acpi_pci_root_add(struct acpi_device *device);
47static int acpi_pci_root_remove(struct acpi_device *device, int type); 48static int acpi_pci_root_remove(struct acpi_device *device, int type);
48static int acpi_pci_root_start(struct acpi_device *device); 49static int acpi_pci_root_start(struct acpi_device *device);
49 50
51#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
52 | OSC_ACTIVE_STATE_PWR_SUPPORT \
53 | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
54 | OSC_MSI_SUPPORT)
55
50static const struct acpi_device_id root_device_ids[] = { 56static const struct acpi_device_id root_device_ids[] = {
51 {"PNP0A03", 0}, 57 {"PNP0A03", 0},
52 {"", 0}, 58 {"", 0},
@@ -566,6 +572,33 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
566 if (flags != base_flags) 572 if (flags != base_flags)
567 acpi_pci_osc_support(root, flags); 573 acpi_pci_osc_support(root, flags);
568 574
575 if (!pcie_ports_disabled
576 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
577 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
578 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
579 | OSC_PCI_EXPRESS_PME_CONTROL;
580
581 if (pci_aer_available()) {
582 if (aer_acpi_firmware_first())
583 dev_dbg(root->bus->bridge,
584 "PCIe errors handled by BIOS.\n");
585 else
586 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
587 }
588
589 dev_info(root->bus->bridge,
590 "Requesting ACPI _OSC control (0x%02x)\n", flags);
591
592 status = acpi_pci_osc_control_set(device->handle, &flags,
593 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
594 if (ACPI_SUCCESS(status))
595 dev_info(root->bus->bridge,
596 "ACPI _OSC control (0x%02x) granted\n", flags);
597 else
598 dev_dbg(root->bus->bridge,
599 "ACPI _OSC request failed (code %d)\n", status);
600 }
601
569 pci_acpi_add_bus_pm_notifier(device, root->bus); 602 pci_acpi_add_bus_pm_notifier(device, root->bus);
570 if (device->wakeup.flags.run_wake) 603 if (device->wakeup.flags.run_wake)
571 device_set_run_wake(root->bus->bridge, true); 604 device_set_run_wake(root->bus->bridge, true);
@@ -600,6 +633,8 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
600 633
601static int __init acpi_pci_root_init(void) 634static int __init acpi_pci_root_init(void)
602{ 635{
636 acpi_hest_init();
637
603 if (acpi_pci_disabled) 638 if (acpi_pci_disabled)
604 return 0; 639 return 0;
605 640
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4c9c2fb5d98f..9ac2a9fa90ff 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -56,9 +56,6 @@ ACPI_MODULE_NAME("power");
56#define ACPI_POWER_RESOURCE_STATE_ON 0x01 56#define ACPI_POWER_RESOURCE_STATE_ON 0x01
57#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 57#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
58 58
59int acpi_power_nocheck;
60module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
61
62static int acpi_power_add(struct acpi_device *device); 59static int acpi_power_add(struct acpi_device *device);
63static int acpi_power_remove(struct acpi_device *device, int type); 60static int acpi_power_remove(struct acpi_device *device, int type);
64static int acpi_power_resume(struct acpi_device *device); 61static int acpi_power_resume(struct acpi_device *device);
@@ -148,9 +145,8 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
148 145
149static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) 146static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
150{ 147{
151 int result = 0, state1; 148 int cur_state;
152 u32 i = 0; 149 int i = 0;
153
154 150
155 if (!list || !state) 151 if (!list || !state)
156 return -EINVAL; 152 return -EINVAL;
@@ -158,25 +154,33 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
158 /* The state of the list is 'on' IFF all resources are 'on'. */ 154 /* The state of the list is 'on' IFF all resources are 'on'. */
159 155
160 for (i = 0; i < list->count; i++) { 156 for (i = 0; i < list->count; i++) {
161 /* 157 struct acpi_power_resource *resource;
162 * The state of the power resource can be obtained by 158 acpi_handle handle = list->handles[i];
163 * using the ACPI handle. In such case it is unnecessary to 159 int result;
164 * get the Power resource first and then get its state again. 160
165 */ 161 result = acpi_power_get_context(handle, &resource);
166 result = acpi_power_get_state(list->handles[i], &state1);
167 if (result) 162 if (result)
168 return result; 163 return result;
169 164
170 *state = state1; 165 mutex_lock(&resource->resource_lock);
171 166
172 if (*state != ACPI_POWER_RESOURCE_STATE_ON) 167 result = acpi_power_get_state(handle, &cur_state);
168
169 mutex_unlock(&resource->resource_lock);
170
171 if (result)
172 return result;
173
174 if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
173 break; 175 break;
174 } 176 }
175 177
176 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", 178 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
177 *state ? "on" : "off")); 179 cur_state ? "on" : "off"));
178 180
179 return result; 181 *state = cur_state;
182
183 return 0;
180} 184}
181 185
182static int __acpi_power_on(struct acpi_power_resource *resource) 186static int __acpi_power_on(struct acpi_power_resource *resource)
@@ -222,7 +226,7 @@ static int acpi_power_on(acpi_handle handle)
222 return result; 226 return result;
223} 227}
224 228
225static int acpi_power_off_device(acpi_handle handle) 229static int acpi_power_off(acpi_handle handle)
226{ 230{
227 int result = 0; 231 int result = 0;
228 acpi_status status = AE_OK; 232 acpi_status status = AE_OK;
@@ -266,6 +270,35 @@ static int acpi_power_off_device(acpi_handle handle)
266 return result; 270 return result;
267} 271}
268 272
273static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
274{
275 int i;
276
277 for (i = num_res - 1; i >= 0 ; i--)
278 acpi_power_off(list->handles[i]);
279}
280
281static void acpi_power_off_list(struct acpi_handle_list *list)
282{
283 __acpi_power_off_list(list, list->count);
284}
285
286static int acpi_power_on_list(struct acpi_handle_list *list)
287{
288 int result = 0;
289 int i;
290
291 for (i = 0; i < list->count; i++) {
292 result = acpi_power_on(list->handles[i]);
293 if (result) {
294 __acpi_power_off_list(list, i);
295 break;
296 }
297 }
298
299 return result;
300}
301
269/** 302/**
270 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in 303 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
271 * ACPI 3.0) _PSW (Power State Wake) 304 * ACPI 3.0) _PSW (Power State Wake)
@@ -404,8 +437,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
404 437
405 /* Close power resource */ 438 /* Close power resource */
406 for (i = 0; i < dev->wakeup.resources.count; i++) { 439 for (i = 0; i < dev->wakeup.resources.count; i++) {
407 int ret = acpi_power_off_device( 440 int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
408 dev->wakeup.resources.handles[i]);
409 if (ret) { 441 if (ret) {
410 printk(KERN_ERR PREFIX "Transition power state\n"); 442 printk(KERN_ERR PREFIX "Transition power state\n");
411 dev->wakeup.flags.valid = 0; 443 dev->wakeup.flags.valid = 0;
@@ -423,19 +455,16 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
423 Device Power Management 455 Device Power Management
424 -------------------------------------------------------------------------- */ 456 -------------------------------------------------------------------------- */
425 457
426int acpi_power_get_inferred_state(struct acpi_device *device) 458int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
427{ 459{
428 int result = 0; 460 int result = 0;
429 struct acpi_handle_list *list = NULL; 461 struct acpi_handle_list *list = NULL;
430 int list_state = 0; 462 int list_state = 0;
431 int i = 0; 463 int i = 0;
432 464
433 465 if (!device || !state)
434 if (!device)
435 return -EINVAL; 466 return -EINVAL;
436 467
437 device->power.state = ACPI_STATE_UNKNOWN;
438
439 /* 468 /*
440 * We know a device's inferred power state when all the resources 469 * We know a device's inferred power state when all the resources
441 * required for a given D-state are 'on'. 470 * required for a given D-state are 'on'.
@@ -450,22 +479,26 @@ int acpi_power_get_inferred_state(struct acpi_device *device)
450 return result; 479 return result;
451 480
452 if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { 481 if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
453 device->power.state = i; 482 *state = i;
454 return 0; 483 return 0;
455 } 484 }
456 } 485 }
457 486
458 device->power.state = ACPI_STATE_D3; 487 *state = ACPI_STATE_D3;
459
460 return 0; 488 return 0;
461} 489}
462 490
491int acpi_power_on_resources(struct acpi_device *device, int state)
492{
493 if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
494 return -EINVAL;
495
496 return acpi_power_on_list(&device->power.states[state].resources);
497}
498
463int acpi_power_transition(struct acpi_device *device, int state) 499int acpi_power_transition(struct acpi_device *device, int state)
464{ 500{
465 int result = 0; 501 int result;
466 struct acpi_handle_list *cl = NULL; /* Current Resources */
467 struct acpi_handle_list *tl = NULL; /* Target Resources */
468 int i = 0;
469 502
470 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 503 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
471 return -EINVAL; 504 return -EINVAL;
@@ -477,37 +510,20 @@ int acpi_power_transition(struct acpi_device *device, int state)
477 || (device->power.state > ACPI_STATE_D3)) 510 || (device->power.state > ACPI_STATE_D3))
478 return -ENODEV; 511 return -ENODEV;
479 512
480 cl = &device->power.states[device->power.state].resources;
481 tl = &device->power.states[state].resources;
482
483 /* TBD: Resources must be ordered. */ 513 /* TBD: Resources must be ordered. */
484 514
485 /* 515 /*
486 * First we reference all power resources required in the target list 516 * First we reference all power resources required in the target list
487 * (e.g. so the device doesn't lose power while transitioning). 517 * (e.g. so the device doesn't lose power while transitioning). Then,
518 * we dereference all power resources used in the current list.
488 */ 519 */
489 for (i = 0; i < tl->count; i++) { 520 result = acpi_power_on_list(&device->power.states[state].resources);
490 result = acpi_power_on(tl->handles[i]); 521 if (!result)
491 if (result) 522 acpi_power_off_list(
492 goto end; 523 &device->power.states[device->power.state].resources);
493 }
494 524
495 /* 525 /* We shouldn't change the state unless the above operations succeed. */
496 * Then we dereference all power resources used in the current list. 526 device->power.state = result ? ACPI_STATE_UNKNOWN : state;
497 */
498 for (i = 0; i < cl->count; i++) {
499 result = acpi_power_off_device(cl->handles[i]);
500 if (result)
501 goto end;
502 }
503
504 end:
505 if (result)
506 device->power.state = ACPI_STATE_UNKNOWN;
507 else {
508 /* We shouldn't change the state till all above operations succeed */
509 device->power.state = state;
510 }
511 527
512 return result; 528 return result;
513} 529}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index afad67769db6..f5f986991b52 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,7 +311,9 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
311 dev->pnp.bus_id, 311 dev->pnp.bus_id,
312 (u32) dev->wakeup.sleep_state, 312 (u32) dev->wakeup.sleep_state,
313 dev->wakeup.flags.run_wake ? '*' : ' ', 313 dev->wakeup.flags.run_wake ? '*' : ' ',
314 dev->wakeup.state.enabled ? "enabled" : "disabled"); 314 (device_may_wakeup(&dev->dev)
315 || (ldev && device_may_wakeup(ldev))) ?
316 "enabled" : "disabled");
315 if (ldev) 317 if (ldev)
316 seq_printf(seq, "%s:%s", 318 seq_printf(seq, "%s:%s",
317 ldev->bus ? ldev->bus->name : "no-bus", 319 ldev->bus ? ldev->bus->name : "no-bus",
@@ -328,8 +330,10 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
328{ 330{
329 struct device *dev = acpi_get_physical_device(adev->handle); 331 struct device *dev = acpi_get_physical_device(adev->handle);
330 332
331 if (dev && device_can_wakeup(dev)) 333 if (dev && device_can_wakeup(dev)) {
332 device_set_wakeup_enable(dev, adev->wakeup.state.enabled); 334 bool enable = !device_may_wakeup(dev);
335 device_set_wakeup_enable(dev, enable);
336 }
333} 337}
334 338
335static ssize_t 339static ssize_t
@@ -341,7 +345,6 @@ acpi_system_write_wakeup_device(struct file *file,
341 char strbuf[5]; 345 char strbuf[5];
342 char str[5] = ""; 346 char str[5] = "";
343 unsigned int len = count; 347 unsigned int len = count;
344 struct acpi_device *found_dev = NULL;
345 348
346 if (len > 4) 349 if (len > 4)
347 len = 4; 350 len = 4;
@@ -361,33 +364,13 @@ acpi_system_write_wakeup_device(struct file *file,
361 continue; 364 continue;
362 365
363 if (!strncmp(dev->pnp.bus_id, str, 4)) { 366 if (!strncmp(dev->pnp.bus_id, str, 4)) {
364 dev->wakeup.state.enabled = 367 if (device_can_wakeup(&dev->dev)) {
365 dev->wakeup.state.enabled ? 0 : 1; 368 bool enable = !device_may_wakeup(&dev->dev);
366 found_dev = dev; 369 device_set_wakeup_enable(&dev->dev, enable);
367 break; 370 } else {
368 }
369 }
370 if (found_dev) {
371 physical_device_enable_wakeup(found_dev);
372 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
373 struct acpi_device *dev = container_of(node,
374 struct
375 acpi_device,
376 wakeup_list);
377
378 if ((dev != found_dev) &&
379 (dev->wakeup.gpe_number ==
380 found_dev->wakeup.gpe_number)
381 && (dev->wakeup.gpe_device ==
382 found_dev->wakeup.gpe_device)) {
383 printk(KERN_WARNING
384 "ACPI: '%s' and '%s' have the same GPE, "
385 "can't disable/enable one separately\n",
386 dev->pnp.bus_id, found_dev->pnp.bus_id);
387 dev->wakeup.state.enabled =
388 found_dev->wakeup.state.enabled;
389 physical_device_enable_wakeup(dev); 371 physical_device_enable_wakeup(dev);
390 } 372 }
373 break;
391 } 374 }
392 } 375 }
393 mutex_unlock(&acpi_device_lock); 376 mutex_unlock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index bec561c14beb..3c1a2fec8cda 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -23,7 +23,7 @@ static int set_no_mwait(const struct dmi_system_id *id)
23{ 23{
24 printk(KERN_NOTICE PREFIX "%s detected - " 24 printk(KERN_NOTICE PREFIX "%s detected - "
25 "disabling mwait for CPU C-states\n", id->ident); 25 "disabling mwait for CPU C-states\n", id->ident);
26 idle_nomwait = 1; 26 boot_option_idle_override = IDLE_NOMWAIT;
27 return 0; 27 return 0;
28} 28}
29 29
@@ -283,7 +283,7 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
283{ 283{
284 acpi_status status = AE_OK; 284 acpi_status status = AE_OK;
285 285
286 if (idle_nomwait) { 286 if (boot_option_idle_override == IDLE_NOMWAIT) {
287 /* 287 /*
288 * If mwait is disabled for CPU C-states, the C2C3_FFH access 288 * If mwait is disabled for CPU C-states, the C2C3_FFH access
289 * mode will be disabled in the parameter of _PDC object. 289 * mode will be disabled in the parameter of _PDC object.
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 85e48047d7b0..360a74e6add0 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,10 +40,6 @@
40#include <linux/pm.h> 40#include <linux/pm.h>
41#include <linux/cpufreq.h> 41#include <linux/cpufreq.h>
42#include <linux/cpu.h> 42#include <linux/cpu.h>
43#ifdef CONFIG_ACPI_PROCFS
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#endif
47#include <linux/dmi.h> 43#include <linux/dmi.h>
48#include <linux/moduleparam.h> 44#include <linux/moduleparam.h>
49#include <linux/cpuidle.h> 45#include <linux/cpuidle.h>
@@ -246,53 +242,6 @@ static int acpi_processor_errata(struct acpi_processor *pr)
246 return result; 242 return result;
247} 243}
248 244
249#ifdef CONFIG_ACPI_PROCFS
250static struct proc_dir_entry *acpi_processor_dir = NULL;
251
252static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
253{
254 struct proc_dir_entry *entry = NULL;
255
256
257 if (!acpi_device_dir(device)) {
258 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
259 acpi_processor_dir);
260 if (!acpi_device_dir(device))
261 return -ENODEV;
262 }
263
264 /* 'throttling' [R/W] */
265 entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
266 S_IFREG | S_IRUGO | S_IWUSR,
267 acpi_device_dir(device),
268 &acpi_processor_throttling_fops,
269 acpi_driver_data(device));
270 if (!entry)
271 return -EIO;
272 return 0;
273}
274static int acpi_processor_remove_fs(struct acpi_device *device)
275{
276
277 if (acpi_device_dir(device)) {
278 remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
279 acpi_device_dir(device));
280 remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
281 acpi_device_dir(device) = NULL;
282 }
283
284 return 0;
285}
286#else
287static inline int acpi_processor_add_fs(struct acpi_device *device)
288{
289 return 0;
290}
291static inline int acpi_processor_remove_fs(struct acpi_device *device)
292{
293 return 0;
294}
295#endif
296/* -------------------------------------------------------------------------- 245/* --------------------------------------------------------------------------
297 Driver Interface 246 Driver Interface
298 -------------------------------------------------------------------------- */ 247 -------------------------------------------------------------------------- */
@@ -478,8 +427,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
478 if (action == CPU_ONLINE && pr) { 427 if (action == CPU_ONLINE && pr) {
479 acpi_processor_ppc_has_changed(pr, 0); 428 acpi_processor_ppc_has_changed(pr, 0);
480 acpi_processor_cst_has_changed(pr); 429 acpi_processor_cst_has_changed(pr);
430 acpi_processor_reevaluate_tstate(pr, action);
481 acpi_processor_tstate_has_changed(pr); 431 acpi_processor_tstate_has_changed(pr);
482 } 432 }
433 if (action == CPU_DEAD && pr) {
434 /* invalidate the flag.throttling after one CPU is offline */
435 acpi_processor_reevaluate_tstate(pr, action);
436 }
483 return NOTIFY_OK; 437 return NOTIFY_OK;
484} 438}
485 439
@@ -537,14 +491,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
537 491
538 per_cpu(processors, pr->id) = pr; 492 per_cpu(processors, pr->id) = pr;
539 493
540 result = acpi_processor_add_fs(device);
541 if (result)
542 goto err_free_cpumask;
543
544 sysdev = get_cpu_sysdev(pr->id); 494 sysdev = get_cpu_sysdev(pr->id);
545 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { 495 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
546 result = -EFAULT; 496 result = -EFAULT;
547 goto err_remove_fs; 497 goto err_free_cpumask;
548 } 498 }
549 499
550#ifdef CONFIG_CPU_FREQ 500#ifdef CONFIG_CPU_FREQ
@@ -590,8 +540,6 @@ err_thermal_unregister:
590 thermal_cooling_device_unregister(pr->cdev); 540 thermal_cooling_device_unregister(pr->cdev);
591err_power_exit: 541err_power_exit:
592 acpi_processor_power_exit(pr, device); 542 acpi_processor_power_exit(pr, device);
593err_remove_fs:
594 acpi_processor_remove_fs(device);
595err_free_cpumask: 543err_free_cpumask:
596 free_cpumask_var(pr->throttling.shared_cpu_map); 544 free_cpumask_var(pr->throttling.shared_cpu_map);
597 545
@@ -620,8 +568,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
620 568
621 sysfs_remove_link(&device->dev.kobj, "sysdev"); 569 sysfs_remove_link(&device->dev.kobj, "sysdev");
622 570
623 acpi_processor_remove_fs(device);
624
625 if (pr->cdev) { 571 if (pr->cdev) {
626 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 572 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
627 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 573 sysfs_remove_link(&pr->cdev->device.kobj, "device");
@@ -854,12 +800,6 @@ static int __init acpi_processor_init(void)
854 800
855 memset(&errata, 0, sizeof(errata)); 801 memset(&errata, 0, sizeof(errata));
856 802
857#ifdef CONFIG_ACPI_PROCFS
858 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
859 if (!acpi_processor_dir)
860 return -ENOMEM;
861#endif
862
863 if (!cpuidle_register_driver(&acpi_idle_driver)) { 803 if (!cpuidle_register_driver(&acpi_idle_driver)) {
864 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 804 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
865 acpi_idle_driver.name); 805 acpi_idle_driver.name);
@@ -885,10 +825,6 @@ static int __init acpi_processor_init(void)
885out_cpuidle: 825out_cpuidle:
886 cpuidle_unregister_driver(&acpi_idle_driver); 826 cpuidle_unregister_driver(&acpi_idle_driver);
887 827
888#ifdef CONFIG_ACPI_PROCFS
889 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
890#endif
891
892 return result; 828 return result;
893} 829}
894 830
@@ -907,10 +843,6 @@ static void __exit acpi_processor_exit(void)
907 843
908 cpuidle_unregister_driver(&acpi_idle_driver); 844 cpuidle_unregister_driver(&acpi_idle_driver);
909 845
910#ifdef CONFIG_ACPI_PROCFS
911 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
912#endif
913
914 return; 846 return;
915} 847}
916 848
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index a765b823aa9e..d615b7d69bca 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -79,6 +79,13 @@ module_param(bm_check_disable, uint, 0000);
79static unsigned int latency_factor __read_mostly = 2; 79static unsigned int latency_factor __read_mostly = 2;
80module_param(latency_factor, uint, 0644); 80module_param(latency_factor, uint, 0644);
81 81
82static int disabled_by_idle_boot_param(void)
83{
84 return boot_option_idle_override == IDLE_POLL ||
85 boot_option_idle_override == IDLE_FORCE_MWAIT ||
86 boot_option_idle_override == IDLE_HALT;
87}
88
82/* 89/*
83 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 90 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
84 * For now disable this. Probably a bug somewhere else. 91 * For now disable this. Probably a bug somewhere else.
@@ -455,7 +462,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
455 continue; 462 continue;
456 } 463 }
457 if (cx.type == ACPI_STATE_C1 && 464 if (cx.type == ACPI_STATE_C1 &&
458 (idle_halt || idle_nomwait)) { 465 (boot_option_idle_override == IDLE_NOMWAIT)) {
459 /* 466 /*
460 * In most cases the C1 space_id obtained from 467 * In most cases the C1 space_id obtained from
461 * _CST object is FIXED_HARDWARE access mode. 468 * _CST object is FIXED_HARDWARE access mode.
@@ -1016,7 +1023,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1016 state->flags = 0; 1023 state->flags = 0;
1017 switch (cx->type) { 1024 switch (cx->type) {
1018 case ACPI_STATE_C1: 1025 case ACPI_STATE_C1:
1019 state->flags |= CPUIDLE_FLAG_SHALLOW;
1020 if (cx->entry_method == ACPI_CSTATE_FFH) 1026 if (cx->entry_method == ACPI_CSTATE_FFH)
1021 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1027 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1022 1028
@@ -1025,16 +1031,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1025 break; 1031 break;
1026 1032
1027 case ACPI_STATE_C2: 1033 case ACPI_STATE_C2:
1028 state->flags |= CPUIDLE_FLAG_BALANCED;
1029 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1034 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1030 state->enter = acpi_idle_enter_simple; 1035 state->enter = acpi_idle_enter_simple;
1031 dev->safe_state = state; 1036 dev->safe_state = state;
1032 break; 1037 break;
1033 1038
1034 case ACPI_STATE_C3: 1039 case ACPI_STATE_C3:
1035 state->flags |= CPUIDLE_FLAG_DEEP;
1036 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1040 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1037 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1038 state->enter = pr->flags.bm_check ? 1041 state->enter = pr->flags.bm_check ?
1039 acpi_idle_enter_bm : 1042 acpi_idle_enter_bm :
1040 acpi_idle_enter_simple; 1043 acpi_idle_enter_simple;
@@ -1058,7 +1061,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1058{ 1061{
1059 int ret = 0; 1062 int ret = 0;
1060 1063
1061 if (boot_option_idle_override) 1064 if (disabled_by_idle_boot_param())
1062 return 0; 1065 return 0;
1063 1066
1064 if (!pr) 1067 if (!pr)
@@ -1089,19 +1092,10 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1089 acpi_status status = 0; 1092 acpi_status status = 0;
1090 static int first_run; 1093 static int first_run;
1091 1094
1092 if (boot_option_idle_override) 1095 if (disabled_by_idle_boot_param())
1093 return 0; 1096 return 0;
1094 1097
1095 if (!first_run) { 1098 if (!first_run) {
1096 if (idle_halt) {
1097 /*
1098 * When the boot option of "idle=halt" is added, halt
1099 * is used for CPU IDLE.
1100 * In such case C2/C3 is meaningless. So the max_cstate
1101 * is set to one.
1102 */
1103 max_cstate = 1;
1104 }
1105 dmi_check_system(processor_power_dmi_table); 1099 dmi_check_system(processor_power_dmi_table);
1106 max_cstate = acpi_processor_cstate_check(max_cstate); 1100 max_cstate = acpi_processor_cstate_check(max_cstate);
1107 if (max_cstate < ACPI_C_STATES_MAX) 1101 if (max_cstate < ACPI_C_STATES_MAX)
@@ -1142,7 +1136,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1142int acpi_processor_power_exit(struct acpi_processor *pr, 1136int acpi_processor_power_exit(struct acpi_processor *pr,
1143 struct acpi_device *device) 1137 struct acpi_device *device)
1144{ 1138{
1145 if (boot_option_idle_override) 1139 if (disabled_by_idle_boot_param())
1146 return 0; 1140 return 0;
1147 1141
1148 cpuidle_unregister_device(&pr->power.dev); 1142 cpuidle_unregister_device(&pr->power.dev);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ff3632717c51..fa84e9744330 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,10 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/cpufreq.h> 34#include <linux/cpufreq.h>
35#ifdef CONFIG_ACPI_PROCFS
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#endif
39 35
40#include <asm/io.h> 36#include <asm/io.h>
41#include <asm/uaccess.h> 37#include <asm/uaccess.h>
@@ -370,6 +366,58 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
370} 366}
371 367
372/* 368/*
369 * This function is used to reevaluate whether the T-state is valid
370 * after one CPU is onlined/offlined.
371 * It is noted that it won't reevaluate the following properties for
372 * the T-state.
373 * 1. Control method.
374 * 2. the number of supported T-state
375 * 3. TSD domain
376 */
377void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
378 unsigned long action)
379{
380 int result = 0;
381
382 if (action == CPU_DEAD) {
383 /* When one CPU is offline, the T-state throttling
384 * will be invalidated.
385 */
386 pr->flags.throttling = 0;
387 return;
388 }
389 /* the following is to recheck whether the T-state is valid for
390 * the online CPU
391 */
392 if (!pr->throttling.state_count) {
393 /* If the number of T-state is invalid, it is
394 * invalidated.
395 */
396 pr->flags.throttling = 0;
397 return;
398 }
399 pr->flags.throttling = 1;
400
401 /* Disable throttling (if enabled). We'll let subsequent
402 * policy (e.g.thermal) decide to lower performance if it
403 * so chooses, but for now we'll crank up the speed.
404 */
405
406 result = acpi_processor_get_throttling(pr);
407 if (result)
408 goto end;
409
410 if (pr->throttling.state) {
411 result = acpi_processor_set_throttling(pr, 0, false);
412 if (result)
413 goto end;
414 }
415
416end:
417 if (result)
418 pr->flags.throttling = 0;
419}
420/*
373 * _PTC - Processor Throttling Control (and status) register location 421 * _PTC - Processor Throttling Control (and status) register location
374 */ 422 */
375static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 423static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
@@ -876,7 +924,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
876 */ 924 */
877 cpumask_copy(saved_mask, &current->cpus_allowed); 925 cpumask_copy(saved_mask, &current->cpus_allowed);
878 /* FIXME: use work_on_cpu() */ 926 /* FIXME: use work_on_cpu() */
879 set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 927 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
928 /* Can't migrate to the target pr->id CPU. Exit */
929 free_cpumask_var(saved_mask);
930 return -ENODEV;
931 }
880 ret = pr->throttling.acpi_processor_get_throttling(pr); 932 ret = pr->throttling.acpi_processor_get_throttling(pr);
881 /* restore the previous state */ 933 /* restore the previous state */
882 set_cpus_allowed_ptr(current, saved_mask); 934 set_cpus_allowed_ptr(current, saved_mask);
@@ -1051,6 +1103,14 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1051 return -ENOMEM; 1103 return -ENOMEM;
1052 } 1104 }
1053 1105
1106 if (cpu_is_offline(pr->id)) {
1107 /*
1108 * the cpu pointed by pr->id is offline. Unnecessary to change
1109 * the throttling state any more.
1110 */
1111 return -ENODEV;
1112 }
1113
1054 cpumask_copy(saved_mask, &current->cpus_allowed); 1114 cpumask_copy(saved_mask, &current->cpus_allowed);
1055 t_state.target_state = state; 1115 t_state.target_state = state;
1056 p_throttling = &(pr->throttling); 1116 p_throttling = &(pr->throttling);
@@ -1074,7 +1134,11 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1074 */ 1134 */
1075 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1135 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1076 /* FIXME: use work_on_cpu() */ 1136 /* FIXME: use work_on_cpu() */
1077 set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1137 if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
1138 /* Can't migrate to the pr->id CPU. Exit */
1139 ret = -ENODEV;
1140 goto exit;
1141 }
1078 ret = p_throttling->acpi_processor_set_throttling(pr, 1142 ret = p_throttling->acpi_processor_set_throttling(pr,
1079 t_state.target_state, force); 1143 t_state.target_state, force);
1080 } else { 1144 } else {
@@ -1106,7 +1170,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1106 } 1170 }
1107 t_state.cpu = i; 1171 t_state.cpu = i;
1108 /* FIXME: use work_on_cpu() */ 1172 /* FIXME: use work_on_cpu() */
1109 set_cpus_allowed_ptr(current, cpumask_of(i)); 1173 if (set_cpus_allowed_ptr(current, cpumask_of(i)))
1174 continue;
1110 ret = match_pr->throttling. 1175 ret = match_pr->throttling.
1111 acpi_processor_set_throttling( 1176 acpi_processor_set_throttling(
1112 match_pr, t_state.target_state, force); 1177 match_pr, t_state.target_state, force);
@@ -1126,6 +1191,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
1126 /* restore the previous state */ 1191 /* restore the previous state */
1127 /* FIXME: use work_on_cpu() */ 1192 /* FIXME: use work_on_cpu() */
1128 set_cpus_allowed_ptr(current, saved_mask); 1193 set_cpus_allowed_ptr(current, saved_mask);
1194exit:
1129 free_cpumask_var(online_throttling_cpus); 1195 free_cpumask_var(online_throttling_cpus);
1130 free_cpumask_var(saved_mask); 1196 free_cpumask_var(saved_mask);
1131 return ret; 1197 return ret;
@@ -1216,113 +1282,3 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1216 return result; 1282 return result;
1217} 1283}
1218 1284
1219#ifdef CONFIG_ACPI_PROCFS
1220/* proc interface */
1221static int acpi_processor_throttling_seq_show(struct seq_file *seq,
1222 void *offset)
1223{
1224 struct acpi_processor *pr = seq->private;
1225 int i = 0;
1226 int result = 0;
1227
1228 if (!pr)
1229 goto end;
1230
1231 if (!(pr->throttling.state_count > 0)) {
1232 seq_puts(seq, "<not supported>\n");
1233 goto end;
1234 }
1235
1236 result = acpi_processor_get_throttling(pr);
1237
1238 if (result) {
1239 seq_puts(seq,
1240 "Could not determine current throttling state.\n");
1241 goto end;
1242 }
1243
1244 seq_printf(seq, "state count: %d\n"
1245 "active state: T%d\n"
1246 "state available: T%d to T%d\n",
1247 pr->throttling.state_count, pr->throttling.state,
1248 pr->throttling_platform_limit,
1249 pr->throttling.state_count - 1);
1250
1251 seq_puts(seq, "states:\n");
1252 if (pr->throttling.acpi_processor_get_throttling ==
1253 acpi_processor_get_throttling_fadt) {
1254 for (i = 0; i < pr->throttling.state_count; i++)
1255 seq_printf(seq, " %cT%d: %02d%%\n",
1256 (i == pr->throttling.state ? '*' : ' '), i,
1257 (pr->throttling.states[i].performance ? pr->
1258 throttling.states[i].performance / 10 : 0));
1259 } else {
1260 for (i = 0; i < pr->throttling.state_count; i++)
1261 seq_printf(seq, " %cT%d: %02d%%\n",
1262 (i == pr->throttling.state ? '*' : ' '), i,
1263 (int)pr->throttling.states_tss[i].
1264 freqpercentage);
1265 }
1266
1267 end:
1268 return 0;
1269}
1270
1271static int acpi_processor_throttling_open_fs(struct inode *inode,
1272 struct file *file)
1273{
1274 return single_open(file, acpi_processor_throttling_seq_show,
1275 PDE(inode)->data);
1276}
1277
1278static ssize_t acpi_processor_write_throttling(struct file *file,
1279 const char __user * buffer,
1280 size_t count, loff_t * data)
1281{
1282 int result = 0;
1283 struct seq_file *m = file->private_data;
1284 struct acpi_processor *pr = m->private;
1285 char state_string[5] = "";
1286 char *charp = NULL;
1287 size_t state_val = 0;
1288 char tmpbuf[5] = "";
1289
1290 if (!pr || (count > sizeof(state_string) - 1))
1291 return -EINVAL;
1292
1293 if (copy_from_user(state_string, buffer, count))
1294 return -EFAULT;
1295
1296 state_string[count] = '\0';
1297 if ((count > 0) && (state_string[count-1] == '\n'))
1298 state_string[count-1] = '\0';
1299
1300 charp = state_string;
1301 if ((state_string[0] == 't') || (state_string[0] == 'T'))
1302 charp++;
1303
1304 state_val = simple_strtoul(charp, NULL, 0);
1305 if (state_val >= pr->throttling.state_count)
1306 return -EINVAL;
1307
1308 snprintf(tmpbuf, 5, "%zu", state_val);
1309
1310 if (strcmp(tmpbuf, charp) != 0)
1311 return -EINVAL;
1312
1313 result = acpi_processor_set_throttling(pr, state_val, false);
1314 if (result)
1315 return result;
1316
1317 return count;
1318}
1319
1320const struct file_operations acpi_processor_throttling_fops = {
1321 .owner = THIS_MODULE,
1322 .open = acpi_processor_throttling_open_fs,
1323 .read = seq_read,
1324 .write = acpi_processor_write_throttling,
1325 .llseek = seq_lseek,
1326 .release = single_release,
1327};
1328#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e5dbedb16bbf..51ae3794ec7f 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -484,6 +484,8 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
484 const struct file_operations *state_fops, 484 const struct file_operations *state_fops,
485 const struct file_operations *alarm_fops, void *data) 485 const struct file_operations *alarm_fops, void *data)
486{ 486{
487 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
488 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
487 if (!*dir) { 489 if (!*dir) {
488 *dir = proc_mkdir(dir_name, parent_dir); 490 *dir = proc_mkdir(dir_name, parent_dir);
489 if (!*dir) { 491 if (!*dir) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 29ef505c487b..b99e62494607 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -778,7 +778,7 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
778 wakeup->resources.handles[i] = element->reference.handle; 778 wakeup->resources.handles[i] = element->reference.handle;
779 } 779 }
780 780
781 acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number); 781 acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
782 782
783 out: 783 out:
784 kfree(buffer.pointer); 784 kfree(buffer.pointer);
@@ -803,7 +803,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
803 /* Power button, Lid switch always enable wakeup */ 803 /* Power button, Lid switch always enable wakeup */
804 if (!acpi_match_device_ids(device, button_device_ids)) { 804 if (!acpi_match_device_ids(device, button_device_ids)) {
805 device->wakeup.flags.run_wake = 1; 805 device->wakeup.flags.run_wake = 1;
806 device->wakeup.flags.always_enabled = 1; 806 device_set_wakeup_capable(&device->dev, true);
807 return; 807 return;
808 } 808 }
809 809
@@ -815,16 +815,22 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
815 !!(event_status & ACPI_EVENT_FLAG_HANDLE); 815 !!(event_status & ACPI_EVENT_FLAG_HANDLE);
816} 816}
817 817
818static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 818static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
819{ 819{
820 acpi_handle temp;
820 acpi_status status = 0; 821 acpi_status status = 0;
821 int psw_error; 822 int psw_error;
822 823
824 /* Presence of _PRW indicates wake capable */
825 status = acpi_get_handle(device->handle, "_PRW", &temp);
826 if (ACPI_FAILURE(status))
827 return;
828
823 status = acpi_bus_extract_wakeup_device_power_package(device->handle, 829 status = acpi_bus_extract_wakeup_device_power_package(device->handle,
824 &device->wakeup); 830 &device->wakeup);
825 if (ACPI_FAILURE(status)) { 831 if (ACPI_FAILURE(status)) {
826 ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); 832 ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
827 goto end; 833 return;
828 } 834 }
829 835
830 device->wakeup.flags.valid = 1; 836 device->wakeup.flags.valid = 1;
@@ -840,13 +846,10 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
840 if (psw_error) 846 if (psw_error)
841 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 847 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
842 "error in _DSW or _PSW evaluation\n")); 848 "error in _DSW or _PSW evaluation\n"));
843
844end:
845 if (ACPI_FAILURE(status))
846 device->flags.wake_capable = 0;
847 return 0;
848} 849}
849 850
851static void acpi_bus_add_power_resource(acpi_handle handle);
852
850static int acpi_bus_get_power_flags(struct acpi_device *device) 853static int acpi_bus_get_power_flags(struct acpi_device *device)
851{ 854{
852 acpi_status status = 0; 855 acpi_status status = 0;
@@ -875,8 +878,12 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
875 acpi_evaluate_reference(device->handle, object_name, NULL, 878 acpi_evaluate_reference(device->handle, object_name, NULL,
876 &ps->resources); 879 &ps->resources);
877 if (ps->resources.count) { 880 if (ps->resources.count) {
881 int j;
882
878 device->power.flags.power_resources = 1; 883 device->power.flags.power_resources = 1;
879 ps->flags.valid = 1; 884 ps->flags.valid = 1;
885 for (j = 0; j < ps->resources.count; j++)
886 acpi_bus_add_power_resource(ps->resources.handles[j]);
880 } 887 }
881 888
882 /* Evaluate "_PSx" to see if we can do explicit sets */ 889 /* Evaluate "_PSx" to see if we can do explicit sets */
@@ -901,10 +908,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
901 device->power.states[ACPI_STATE_D3].flags.valid = 1; 908 device->power.states[ACPI_STATE_D3].flags.valid = 1;
902 device->power.states[ACPI_STATE_D3].power = 0; 909 device->power.states[ACPI_STATE_D3].power = 0;
903 910
904 /* TBD: System wake support and resource requirements. */ 911 acpi_bus_init_power(device);
905
906 device->power.state = ACPI_STATE_UNKNOWN;
907 acpi_bus_get_power(device->handle, &(device->power.state));
908 912
909 return 0; 913 return 0;
910} 914}
@@ -947,11 +951,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
947 if (ACPI_SUCCESS(status)) 951 if (ACPI_SUCCESS(status))
948 device->flags.power_manageable = 1; 952 device->flags.power_manageable = 1;
949 953
950 /* Presence of _PRW indicates wake capable */
951 status = acpi_get_handle(device->handle, "_PRW", &temp);
952 if (ACPI_SUCCESS(status))
953 device->flags.wake_capable = 1;
954
955 /* TBD: Performance management */ 954 /* TBD: Performance management */
956 955
957 return 0; 956 return 0;
@@ -1278,11 +1277,7 @@ static int acpi_add_single_object(struct acpi_device **child,
1278 * Wakeup device management 1277 * Wakeup device management
1279 *----------------------- 1278 *-----------------------
1280 */ 1279 */
1281 if (device->flags.wake_capable) { 1280 acpi_bus_get_wakeup_device_flags(device);
1282 result = acpi_bus_get_wakeup_device_flags(device);
1283 if (result)
1284 goto end;
1285 }
1286 1281
1287 /* 1282 /*
1288 * Performance Management 1283 * Performance Management
@@ -1326,6 +1321,20 @@ end:
1326#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ 1321#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
1327 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) 1322 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
1328 1323
1324static void acpi_bus_add_power_resource(acpi_handle handle)
1325{
1326 struct acpi_bus_ops ops = {
1327 .acpi_op_add = 1,
1328 .acpi_op_start = 1,
1329 };
1330 struct acpi_device *device = NULL;
1331
1332 acpi_bus_get_device(handle, &device);
1333 if (!device)
1334 acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
1335 ACPI_STA_DEFAULT, &ops);
1336}
1337
1329static int acpi_bus_type_and_status(acpi_handle handle, int *type, 1338static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1330 unsigned long long *sta) 1339 unsigned long long *sta)
1331{ 1340{
@@ -1371,7 +1380,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
1371 struct acpi_bus_ops *ops = context; 1380 struct acpi_bus_ops *ops = context;
1372 int type; 1381 int type;
1373 unsigned long long sta; 1382 unsigned long long sta;
1374 struct acpi_device_wakeup wakeup;
1375 struct acpi_device *device; 1383 struct acpi_device *device;
1376 acpi_status status; 1384 acpi_status status;
1377 int result; 1385 int result;
@@ -1382,7 +1390,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
1382 1390
1383 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1391 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1384 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1392 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
1385 acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); 1393 struct acpi_device_wakeup wakeup;
1394 acpi_handle temp;
1395
1396 status = acpi_get_handle(handle, "_PRW", &temp);
1397 if (ACPI_SUCCESS(status))
1398 acpi_bus_extract_wakeup_device_power_package(handle,
1399 &wakeup);
1386 return AE_CTRL_DEPTH; 1400 return AE_CTRL_DEPTH;
1387 } 1401 }
1388 1402
@@ -1467,7 +1481,7 @@ int acpi_bus_start(struct acpi_device *device)
1467 1481
1468 result = acpi_bus_scan(device->handle, &ops, NULL); 1482 result = acpi_bus_scan(device->handle, &ops, NULL);
1469 1483
1470 acpi_update_gpes(); 1484 acpi_update_all_gpes();
1471 1485
1472 return result; 1486 return result;
1473} 1487}
@@ -1573,6 +1587,8 @@ int __init acpi_scan_init(void)
1573 printk(KERN_ERR PREFIX "Could not register bus type\n"); 1587 printk(KERN_ERR PREFIX "Could not register bus type\n");
1574 } 1588 }
1575 1589
1590 acpi_power_init();
1591
1576 /* 1592 /*
1577 * Enumerate devices in the ACPI namespace. 1593 * Enumerate devices in the ACPI namespace.
1578 */ 1594 */
@@ -1584,7 +1600,7 @@ int __init acpi_scan_init(void)
1584 if (result) 1600 if (result)
1585 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1601 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
1586 else 1602 else
1587 acpi_update_gpes(); 1603 acpi_update_all_gpes();
1588 1604
1589 return result; 1605 return result;
1590} 1606}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c423231b952b..d6a8cd14de2e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,8 +124,7 @@ static int acpi_pm_freeze(void)
124static int acpi_pm_pre_suspend(void) 124static int acpi_pm_pre_suspend(void)
125{ 125{
126 acpi_pm_freeze(); 126 acpi_pm_freeze();
127 suspend_nvs_save(); 127 return suspend_nvs_save();
128 return 0;
129} 128}
130 129
131/** 130/**
@@ -151,7 +150,7 @@ static int acpi_pm_prepare(void)
151{ 150{
152 int error = __acpi_pm_prepare(); 151 int error = __acpi_pm_prepare();
153 if (!error) 152 if (!error)
154 acpi_pm_pre_suspend(); 153 error = acpi_pm_pre_suspend();
155 154
156 return error; 155 return error;
157} 156}
@@ -167,6 +166,7 @@ static void acpi_pm_finish(void)
167 u32 acpi_state = acpi_target_sleep_state; 166 u32 acpi_state = acpi_target_sleep_state;
168 167
169 acpi_ec_unblock_transactions(); 168 acpi_ec_unblock_transactions();
169 suspend_nvs_free();
170 170
171 if (acpi_state == ACPI_STATE_S0) 171 if (acpi_state == ACPI_STATE_S0)
172 return; 172 return;
@@ -187,7 +187,6 @@ static void acpi_pm_finish(void)
187 */ 187 */
188static void acpi_pm_end(void) 188static void acpi_pm_end(void)
189{ 189{
190 suspend_nvs_free();
191 /* 190 /*
192 * This is necessary in case acpi_pm_finish() is not called during a 191 * This is necessary in case acpi_pm_finish() is not called during a
193 * failing transition to a sleep state. 192 * failing transition to a sleep state.
@@ -435,6 +434,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
435 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), 434 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
436 }, 435 },
437 }, 436 },
437 {
438 .callback = init_nvs_nosave,
439 .ident = "Averatec AV1020-ED2",
440 .matches = {
441 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
442 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
443 },
444 },
438 {}, 445 {},
439}; 446};
440#endif /* CONFIG_SUSPEND */ 447#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index f8588f81048a..61891e75583d 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -438,7 +438,7 @@ static void delete_gpe_attr_array(void)
438 return; 438 return;
439} 439}
440 440
441void acpi_os_gpe_count(u32 gpe_number) 441static void gpe_count(u32 gpe_number)
442{ 442{
443 acpi_gpe_count++; 443 acpi_gpe_count++;
444 444
@@ -454,7 +454,7 @@ void acpi_os_gpe_count(u32 gpe_number)
454 return; 454 return;
455} 455}
456 456
457void acpi_os_fixed_event_count(u32 event_number) 457static void fixed_event_count(u32 event_number)
458{ 458{
459 if (!all_counters) 459 if (!all_counters)
460 return; 460 return;
@@ -468,6 +468,16 @@ void acpi_os_fixed_event_count(u32 event_number)
468 return; 468 return;
469} 469}
470 470
471static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
472 u32 event_number, void *context)
473{
474 if (event_type == ACPI_EVENT_TYPE_GPE)
475 gpe_count(event_number);
476
477 if (event_type == ACPI_EVENT_TYPE_FIXED)
478 fixed_event_count(event_number);
479}
480
471static int get_status(u32 index, acpi_event_status *status, 481static int get_status(u32 index, acpi_event_status *status,
472 acpi_handle *handle) 482 acpi_handle *handle)
473{ 483{
@@ -601,6 +611,7 @@ end:
601 611
602void acpi_irq_stats_init(void) 612void acpi_irq_stats_init(void)
603{ 613{
614 acpi_status status;
604 int i; 615 int i;
605 616
606 if (all_counters) 617 if (all_counters)
@@ -619,6 +630,10 @@ void acpi_irq_stats_init(void)
619 if (all_counters == NULL) 630 if (all_counters == NULL)
620 goto fail; 631 goto fail;
621 632
633 status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
634 if (ACPI_FAILURE(status))
635 goto fail;
636
622 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), 637 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
623 GFP_KERNEL); 638 GFP_KERNEL);
624 if (counter_attrs == NULL) 639 if (counter_attrs == NULL)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5a27b0a31315..2607e17b520f 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1059,8 +1059,9 @@ static int acpi_thermal_resume(struct acpi_device *device)
1059 break; 1059 break;
1060 tz->trips.active[i].flags.enabled = 1; 1060 tz->trips.active[i].flags.enabled = 1;
1061 for (j = 0; j < tz->trips.active[i].devices.count; j++) { 1061 for (j = 0; j < tz->trips.active[i].devices.count; j++) {
1062 result = acpi_bus_get_power(tz->trips.active[i].devices. 1062 result = acpi_bus_update_power(
1063 handles[j], &power_state); 1063 tz->trips.active[i].devices.handles[j],
1064 &power_state);
1064 if (result || (power_state != ACPI_STATE_D0)) { 1065 if (result || (power_state != ACPI_STATE_D0)) {
1065 tz->trips.active[i].flags.enabled = 0; 1066 tz->trips.active[i].flags.enabled = 0;
1066 break; 1067 break;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 15a0fde4b32a..90f8f7676d1f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -33,7 +33,6 @@
33#include <linux/input.h> 33#include <linux/input.h>
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include <linux/thermal.h> 35#include <linux/thermal.h>
36#include <linux/video_output.h>
37#include <linux/sort.h> 36#include <linux/sort.h>
38#include <linux/pci.h> 37#include <linux/pci.h>
39#include <linux/pci_ids.h> 38#include <linux/pci_ids.h>
@@ -81,6 +80,13 @@ module_param(brightness_switch_enabled, bool, 0644);
81static int allow_duplicates; 80static int allow_duplicates;
82module_param(allow_duplicates, bool, 0644); 81module_param(allow_duplicates, bool, 0644);
83 82
83/*
84 * Some BIOSes claim they use minimum backlight at boot,
85 * and this may bring dimming screen after boot
86 */
87static int use_bios_initial_backlight = 1;
88module_param(use_bios_initial_backlight, bool, 0644);
89
84static int register_count = 0; 90static int register_count = 0;
85static int acpi_video_bus_add(struct acpi_device *device); 91static int acpi_video_bus_add(struct acpi_device *device);
86static int acpi_video_bus_remove(struct acpi_device *device, int type); 92static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -172,9 +178,6 @@ struct acpi_video_device_cap {
172 u8 _BQC:1; /* Get current brightness level */ 178 u8 _BQC:1; /* Get current brightness level */
173 u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ 179 u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */
174 u8 _DDC:1; /*Return the EDID for this device */ 180 u8 _DDC:1; /*Return the EDID for this device */
175 u8 _DCS:1; /*Return status of output device */
176 u8 _DGS:1; /*Query graphics state */
177 u8 _DSS:1; /*Device state set */
178}; 181};
179 182
180struct acpi_video_brightness_flags { 183struct acpi_video_brightness_flags {
@@ -202,7 +205,6 @@ struct acpi_video_device {
202 struct acpi_video_device_brightness *brightness; 205 struct acpi_video_device_brightness *brightness;
203 struct backlight_device *backlight; 206 struct backlight_device *backlight;
204 struct thermal_cooling_device *cooling_dev; 207 struct thermal_cooling_device *cooling_dev;
205 struct output_device *output_dev;
206}; 208};
207 209
208static const char device_decode[][30] = { 210static const char device_decode[][30] = {
@@ -226,10 +228,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
226 u32 level_current, u32 event); 228 u32 level_current, u32 event);
227static int acpi_video_switch_brightness(struct acpi_video_device *device, 229static int acpi_video_switch_brightness(struct acpi_video_device *device,
228 int event); 230 int event);
229static int acpi_video_device_get_state(struct acpi_video_device *device,
230 unsigned long long *state);
231static int acpi_video_output_get(struct output_device *od);
232static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
233 231
234/*backlight device sysfs support*/ 232/*backlight device sysfs support*/
235static int acpi_video_get_brightness(struct backlight_device *bd) 233static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -265,30 +263,6 @@ static const struct backlight_ops acpi_backlight_ops = {
265 .update_status = acpi_video_set_brightness, 263 .update_status = acpi_video_set_brightness,
266}; 264};
267 265
268/*video output device sysfs support*/
269static int acpi_video_output_get(struct output_device *od)
270{
271 unsigned long long state;
272 struct acpi_video_device *vd =
273 (struct acpi_video_device *)dev_get_drvdata(&od->dev);
274 acpi_video_device_get_state(vd, &state);
275 return (int)state;
276}
277
278static int acpi_video_output_set(struct output_device *od)
279{
280 unsigned long state = od->request_state;
281 struct acpi_video_device *vd=
282 (struct acpi_video_device *)dev_get_drvdata(&od->dev);
283 return acpi_video_device_set_state(vd, state);
284}
285
286static struct output_properties acpi_output_properties = {
287 .set_state = acpi_video_output_set,
288 .get_status = acpi_video_output_get,
289};
290
291
292/* thermal cooling device callbacks */ 266/* thermal cooling device callbacks */
293static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned 267static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
294 long *state) 268 long *state)
@@ -344,34 +318,6 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
344 Video Management 318 Video Management
345 -------------------------------------------------------------------------- */ 319 -------------------------------------------------------------------------- */
346 320
347/* device */
348
349static int
350acpi_video_device_get_state(struct acpi_video_device *device,
351 unsigned long long *state)
352{
353 int status;
354
355 status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
356
357 return status;
358}
359
360static int
361acpi_video_device_set_state(struct acpi_video_device *device, int state)
362{
363 int status;
364 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
365 struct acpi_object_list args = { 1, &arg0 };
366 unsigned long long ret;
367
368
369 arg0.integer.value = state;
370 status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
371
372 return status;
373}
374
375static int 321static int
376acpi_video_device_lcd_query_levels(struct acpi_video_device *device, 322acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
377 union acpi_object **levels) 323 union acpi_object **levels)
@@ -766,9 +712,11 @@ acpi_video_init_brightness(struct acpi_video_device *device)
766 * when invoked for the first time, i.e. level_old is invalid. 712 * when invoked for the first time, i.e. level_old is invalid.
767 * set the backlight to max_level in this case 713 * set the backlight to max_level in this case
768 */ 714 */
769 for (i = 2; i < br->count; i++) 715 if (use_bios_initial_backlight) {
770 if (level_old == br->levels[i]) 716 for (i = 2; i < br->count; i++)
771 level = level_old; 717 if (level_old == br->levels[i])
718 level = level_old;
719 }
772 goto set_level; 720 goto set_level;
773 } 721 }
774 722
@@ -831,15 +779,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
831 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 779 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
832 device->cap._DDC = 1; 780 device->cap._DDC = 1;
833 } 781 }
834 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
835 device->cap._DCS = 1;
836 }
837 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
838 device->cap._DGS = 1;
839 }
840 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
841 device->cap._DSS = 1;
842 }
843 782
844 if (acpi_video_backlight_support()) { 783 if (acpi_video_backlight_support()) {
845 struct backlight_properties props; 784 struct backlight_properties props;
@@ -904,21 +843,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
904 printk(KERN_ERR PREFIX "Create sysfs link\n"); 843 printk(KERN_ERR PREFIX "Create sysfs link\n");
905 844
906 } 845 }
907
908 if (acpi_video_display_switch_support()) {
909
910 if (device->cap._DCS && device->cap._DSS) {
911 static int count;
912 char *name;
913 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
914 if (!name)
915 return;
916 count++;
917 device->output_dev = video_output_register(name,
918 NULL, device, &acpi_output_properties);
919 kfree(name);
920 }
921 }
922} 846}
923 847
924/* 848/*
@@ -1360,6 +1284,9 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
1360 if (!video_device) 1284 if (!video_device)
1361 continue; 1285 continue;
1362 1286
1287 if (!video_device->cap._DDC)
1288 continue;
1289
1363 if (type) { 1290 if (type) {
1364 switch (type) { 1291 switch (type) {
1365 case ACPI_VIDEO_DISPLAY_CRT: 1292 case ACPI_VIDEO_DISPLAY_CRT:
@@ -1452,7 +1379,6 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1452 thermal_cooling_device_unregister(device->cooling_dev); 1379 thermal_cooling_device_unregister(device->cooling_dev);
1453 device->cooling_dev = NULL; 1380 device->cooling_dev = NULL;
1454 } 1381 }
1455 video_output_unregister(device->output_dev);
1456 1382
1457 return 0; 1383 return 0;
1458} 1384}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b83676126598..42d3d72dae85 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,15 +17,14 @@
17 * capabilities the graphics cards plugged in support. The check for general 17 * capabilities the graphics cards plugged in support. The check for general
18 * video capabilities will be triggered by the first caller of 18 * video capabilities will be triggered by the first caller of
19 * acpi_video_get_capabilities(NULL); which will happen when the first 19 * acpi_video_get_capabilities(NULL); which will happen when the first
20 * backlight (or display output) switching supporting driver calls: 20 * backlight switching supporting driver calls:
21 * acpi_video_backlight_support(); 21 * acpi_video_backlight_support();
22 * 22 *
23 * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) 23 * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
24 * are available, video.ko should be used to handle the device. 24 * are available, video.ko should be used to handle the device.
25 * 25 *
26 * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, 26 * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
27 * sony_acpi,... can take care about backlight brightness and display output 27 * sony_acpi,... can take care about backlight brightness.
28 * switching.
29 * 28 *
30 * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) 29 * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
31 * this file will not be compiled, acpi_video_get_capabilities() and 30 * this file will not be compiled, acpi_video_get_capabilities() and
@@ -83,11 +82,6 @@ long acpi_is_video_device(struct acpi_device *device)
83 if (!device) 82 if (!device)
84 return 0; 83 return 0;
85 84
86 /* Is this device able to support video switching ? */
87 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
88 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
89 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
90
91 /* Is this device able to retrieve a video ROM ? */ 85 /* Is this device able to retrieve a video ROM ? */
92 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) 86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
93 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 87 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
@@ -161,8 +155,6 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
161 * 155 *
162 * if (dmi_name_in_vendors("XY")) { 156 * if (dmi_name_in_vendors("XY")) {
163 * acpi_video_support |= 157 * acpi_video_support |=
164 * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
165 * acpi_video_support |=
166 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; 158 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
167 *} 159 *}
168 */ 160 */
@@ -212,33 +204,8 @@ int acpi_video_backlight_support(void)
212EXPORT_SYMBOL(acpi_video_backlight_support); 204EXPORT_SYMBOL(acpi_video_backlight_support);
213 205
214/* 206/*
215 * Returns true if video.ko can do display output switching. 207 * Use acpi_backlight=vendor/video to force that backlight switching
216 * This does not work well/at all with binary graphics drivers 208 * is processed by vendor specific acpi drivers or video.ko driver.
217 * which disable system io ranges and do it on their own.
218 */
219int acpi_video_display_switch_support(void)
220{
221 if (!acpi_video_caps_checked)
222 acpi_video_get_capabilities(NULL);
223
224 if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
225 return 0;
226 else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
227 return 1;
228
229 if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
230 return 0;
231 else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
232 return 1;
233
234 return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
235}
236EXPORT_SYMBOL(acpi_video_display_switch_support);
237
238/*
239 * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
240 * To force that backlight or display output switching is processed by vendor
241 * specific acpi drivers or video.ko driver.
242 */ 209 */
243static int __init acpi_backlight(char *str) 210static int __init acpi_backlight(char *str)
244{ 211{
@@ -255,19 +222,3 @@ static int __init acpi_backlight(char *str)
255 return 1; 222 return 1;
256} 223}
257__setup("acpi_backlight=", acpi_backlight); 224__setup("acpi_backlight=", acpi_backlight);
258
259static int __init acpi_display_output(char *str)
260{
261 if (str == NULL || *str == '\0')
262 return 1;
263 else {
264 if (!strcmp("vendor", str))
265 acpi_video_support |=
266 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
267 if (!strcmp("video", str))
268 acpi_video_support |=
269 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
270 }
271 return 1;
272}
273__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index f62a50c3ed34..ed6501452507 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -37,15 +37,16 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
37 container_of(node, struct acpi_device, wakeup_list); 37 container_of(node, struct acpi_device, wakeup_list);
38 38
39 if (!dev->wakeup.flags.valid 39 if (!dev->wakeup.flags.valid
40 || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) 40 || sleep_state > (u32) dev->wakeup.sleep_state
41 || sleep_state > (u32) dev->wakeup.sleep_state) 41 || !(device_may_wakeup(&dev->dev)
42 || dev->wakeup.prepare_count))
42 continue; 43 continue;
43 44
44 if (dev->wakeup.state.enabled) 45 if (device_may_wakeup(&dev->dev))
45 acpi_enable_wakeup_device_power(dev, sleep_state); 46 acpi_enable_wakeup_device_power(dev, sleep_state);
46 47
47 /* The wake-up power should have been enabled already. */ 48 /* The wake-up power should have been enabled already. */
48 acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 49 acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
49 ACPI_GPE_ENABLE); 50 ACPI_GPE_ENABLE);
50 } 51 }
51} 52}
@@ -63,14 +64,15 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
63 container_of(node, struct acpi_device, wakeup_list); 64 container_of(node, struct acpi_device, wakeup_list);
64 65
65 if (!dev->wakeup.flags.valid 66 if (!dev->wakeup.flags.valid
66 || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) 67 || sleep_state > (u32) dev->wakeup.sleep_state
67 || (sleep_state > (u32) dev->wakeup.sleep_state)) 68 || !(device_may_wakeup(&dev->dev)
69 || dev->wakeup.prepare_count))
68 continue; 70 continue;
69 71
70 acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 72 acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
71 ACPI_GPE_DISABLE); 73 ACPI_GPE_DISABLE);
72 74
73 if (dev->wakeup.state.enabled) 75 if (device_may_wakeup(&dev->dev))
74 acpi_disable_wakeup_device_power(dev); 76 acpi_disable_wakeup_device_power(dev);
75 } 77 }
76} 78}
@@ -84,8 +86,8 @@ int __init acpi_wakeup_device_init(void)
84 struct acpi_device *dev = container_of(node, 86 struct acpi_device *dev = container_of(node,
85 struct acpi_device, 87 struct acpi_device,
86 wakeup_list); 88 wakeup_list);
87 if (dev->wakeup.flags.always_enabled) 89 if (device_can_wakeup(&dev->dev))
88 dev->wakeup.state.enabled = 1; 90 device_set_wakeup_enable(&dev->dev, true);
89 } 91 }
90 mutex_unlock(&acpi_device_lock); 92 mutex_unlock(&acpi_device_lock);
91 return 0; 93 return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c6b298d4c136..c2328aed0836 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -783,7 +783,7 @@ config PATA_PCMCIA
783 783
784config PATA_PLATFORM 784config PATA_PLATFORM
785 tristate "Generic platform device PATA support" 785 tristate "Generic platform device PATA support"
786 depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM 786 depends on EXPERT || PPC || HAVE_PATA_PLATFORM
787 help 787 help
788 This option enables support for generic directly connected ATA 788 This option enables support for generic directly connected ATA
789 devices commonly found on embedded systems. 789 devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 328826381a2d..b8d96ce37fc9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ 260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ 261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ 262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
263 264
264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 265 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 266 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,6 +380,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
379 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 380 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
380 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 381 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
381 { PCI_DEVICE(0x1b4b, 0x9123), 382 { PCI_DEVICE(0x1b4b, 0x9123),
383 .class = PCI_CLASS_STORAGE_SATA_AHCI,
384 .class_mask = 0xffffff,
382 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 385 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
383 386
384 /* Promise */ 387 /* Promise */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a31fe96f7de6..d4e52e214859 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4138 * device and controller are SATA. 4138 * device and controller are SATA.
4139 */ 4139 */
4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
4141 4142
4142 /* End Marker */ 4143 /* End Marker */
4143 { } 4144 { }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5defc74973d7..600f6353ecf8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1099,9 +1099,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1099 struct request_queue *q = sdev->request_queue; 1099 struct request_queue *q = sdev->request_queue;
1100 void *buf; 1100 void *buf;
1101 1101
1102 /* set the min alignment and padding */ 1102 sdev->sector_size = ATA_SECT_SIZE;
1103 blk_queue_update_dma_alignment(sdev->request_queue, 1103
1104 ATA_DMA_PAD_SZ - 1); 1104 /* set DMA padding */
1105 blk_queue_update_dma_pad(sdev->request_queue, 1105 blk_queue_update_dma_pad(sdev->request_queue,
1106 ATA_DMA_PAD_SZ - 1); 1106 ATA_DMA_PAD_SZ - 1);
1107 1107
@@ -1115,13 +1115,25 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1115 1115
1116 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1116 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1117 } else { 1117 } else {
1118 /* ATA devices must be sector aligned */
1119 sdev->sector_size = ata_id_logical_sector_size(dev->id); 1118 sdev->sector_size = ata_id_logical_sector_size(dev->id);
1120 blk_queue_update_dma_alignment(sdev->request_queue,
1121 sdev->sector_size - 1);
1122 sdev->manage_start_stop = 1; 1119 sdev->manage_start_stop = 1;
1123 } 1120 }
1124 1121
1122 /*
1123 * ata_pio_sectors() expects buffer for each sector to not cross
1124 * page boundary. Enforce it by requiring buffers to be sector
1125 * aligned, which works iff sector_size is not larger than
1126 * PAGE_SIZE. ATAPI devices also need the alignment as
1127 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
1128 */
1129 if (sdev->sector_size > PAGE_SIZE)
1130 ata_dev_printk(dev, KERN_WARNING,
1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132 sdev->sector_size);
1133
1134 blk_queue_update_dma_alignment(sdev->request_queue,
1135 sdev->sector_size - 1);
1136
1125 if (dev->flags & ATA_DFLAG_AN) 1137 if (dev->flags & ATA_DFLAG_AN)
1126 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1138 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1127 1139
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index d7e57db36bc8..538ec38ba995 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_hpt366" 27#define DRV_NAME "pata_hpt366"
28#define DRV_VERSION "0.6.9" 28#define DRV_VERSION "0.6.10"
29 29
30struct hpt_clock { 30struct hpt_clock {
31 u8 xfer_mode; 31 u8 xfer_mode;
@@ -160,8 +160,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
160 160
161 while (list[i] != NULL) { 161 while (list[i] != NULL) {
162 if (!strcmp(list[i], model_num)) { 162 if (!strcmp(list[i], model_num)) {
163 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 163 pr_warning(DRV_NAME ": %s is not supported for %s.\n",
164 modestr, list[i]); 164 modestr, list[i]);
165 return 1; 165 return 1;
166 } 166 }
167 i++; 167 i++;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index efdd18bc8663..4c5b5183225e 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -24,7 +24,7 @@
24#include <linux/libata.h> 24#include <linux/libata.h>
25 25
26#define DRV_NAME "pata_hpt37x" 26#define DRV_NAME "pata_hpt37x"
27#define DRV_VERSION "0.6.18" 27#define DRV_VERSION "0.6.22"
28 28
29struct hpt_clock { 29struct hpt_clock {
30 u8 xfer_speed; 30 u8 xfer_speed;
@@ -229,8 +229,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
229 229
230 while (list[i] != NULL) { 230 while (list[i] != NULL) {
231 if (!strcmp(list[i], model_num)) { 231 if (!strcmp(list[i], model_num)) {
232 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 232 pr_warning(DRV_NAME ": %s is not supported for %s.\n",
233 modestr, list[i]); 233 modestr, list[i]);
234 return 1; 234 return 1;
235 } 235 }
236 i++; 236 i++;
@@ -642,7 +642,6 @@ static struct ata_port_operations hpt372_port_ops = {
642static struct ata_port_operations hpt374_fn1_port_ops = { 642static struct ata_port_operations hpt374_fn1_port_ops = {
643 .inherits = &hpt372_port_ops, 643 .inherits = &hpt372_port_ops,
644 .cable_detect = hpt374_fn1_cable_detect, 644 .cable_detect = hpt374_fn1_cable_detect,
645 .prereset = hpt37x_pre_reset,
646}; 645};
647 646
648/** 647/**
@@ -803,7 +802,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
803 .udma_mask = ATA_UDMA6, 802 .udma_mask = ATA_UDMA6,
804 .port_ops = &hpt302_port_ops 803 .port_ops = &hpt302_port_ops
805 }; 804 };
806 /* HPT374 - UDMA100, function 1 uses different prereset method */ 805 /* HPT374 - UDMA100, function 1 uses different cable_detect method */
807 static const struct ata_port_info info_hpt374_fn0 = { 806 static const struct ata_port_info info_hpt374_fn0 = {
808 .flags = ATA_FLAG_SLAVE_POSS, 807 .flags = ATA_FLAG_SLAVE_POSS,
809 .pio_mask = ATA_PIO4, 808 .pio_mask = ATA_PIO4,
@@ -838,7 +837,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
838 if (rc) 837 if (rc)
839 return rc; 838 return rc;
840 839
841 if (dev->device == PCI_DEVICE_ID_TTI_HPT366) { 840 switch (dev->device) {
841 case PCI_DEVICE_ID_TTI_HPT366:
842 /* May be a later chip in disguise. Check */ 842 /* May be a later chip in disguise. Check */
843 /* Older chips are in the HPT366 driver. Ignore them */ 843 /* Older chips are in the HPT366 driver. Ignore them */
844 if (rev < 3) 844 if (rev < 3)
@@ -863,54 +863,50 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
863 chip_table = &hpt372; 863 chip_table = &hpt372;
864 break; 864 break;
865 default: 865 default:
866 printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, " 866 pr_err(DRV_NAME ": Unknown HPT366 subtype, "
867 "please report (%d).\n", rev); 867 "please report (%d).\n", rev);
868 return -ENODEV; 868 return -ENODEV;
869 } 869 }
870 } else { 870 break;
871 switch (dev->device) { 871 case PCI_DEVICE_ID_TTI_HPT372:
872 case PCI_DEVICE_ID_TTI_HPT372: 872 /* 372N if rev >= 2 */
873 /* 372N if rev >= 2 */ 873 if (rev >= 2)
874 if (rev >= 2) 874 return -ENODEV;
875 return -ENODEV; 875 ppi[0] = &info_hpt372;
876 ppi[0] = &info_hpt372; 876 chip_table = &hpt372a;
877 chip_table = &hpt372a; 877 break;
878 break; 878 case PCI_DEVICE_ID_TTI_HPT302:
879 case PCI_DEVICE_ID_TTI_HPT302: 879 /* 302N if rev > 1 */
880 /* 302N if rev > 1 */ 880 if (rev > 1)
881 if (rev > 1) 881 return -ENODEV;
882 return -ENODEV; 882 ppi[0] = &info_hpt302;
883 ppi[0] = &info_hpt302; 883 /* Check this */
884 /* Check this */ 884 chip_table = &hpt302;
885 chip_table = &hpt302; 885 break;
886 break; 886 case PCI_DEVICE_ID_TTI_HPT371:
887 case PCI_DEVICE_ID_TTI_HPT371: 887 if (rev > 1)
888 if (rev > 1) 888 return -ENODEV;
889 return -ENODEV; 889 ppi[0] = &info_hpt302;
890 ppi[0] = &info_hpt302; 890 chip_table = &hpt371;
891 chip_table = &hpt371; 891 /*
892 /* 892 * Single channel device, master is not present but the BIOS
893 * Single channel device, master is not present 893 * (or us for non x86) must mark it absent
894 * but the BIOS (or us for non x86) must mark it 894 */
895 * absent 895 pci_read_config_byte(dev, 0x50, &mcr1);
896 */ 896 mcr1 &= ~0x04;
897 pci_read_config_byte(dev, 0x50, &mcr1); 897 pci_write_config_byte(dev, 0x50, mcr1);
898 mcr1 &= ~0x04; 898 break;
899 pci_write_config_byte(dev, 0x50, mcr1); 899 case PCI_DEVICE_ID_TTI_HPT374:
900 break; 900 chip_table = &hpt374;
901 case PCI_DEVICE_ID_TTI_HPT374: 901 if (!(PCI_FUNC(dev->devfn) & 1))
902 chip_table = &hpt374; 902 *ppi = &info_hpt374_fn0;
903 if (!(PCI_FUNC(dev->devfn) & 1)) 903 else
904 *ppi = &info_hpt374_fn0; 904 *ppi = &info_hpt374_fn1;
905 else 905 break;
906 *ppi = &info_hpt374_fn1; 906 default:
907 break; 907 pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
908 default: 908 dev->device);
909 printk(KERN_ERR 909 return -ENODEV;
910 "pata_hpt37x: PCI table is bogus, please report (%d).\n",
911 dev->device);
912 return -ENODEV;
913 }
914 } 910 }
915 /* Ok so this is a chip we support */ 911 /* Ok so this is a chip we support */
916 912
@@ -957,8 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
957 u8 sr; 953 u8 sr;
958 u32 total = 0; 954 u32 total = 0;
959 955
960 printk(KERN_WARNING 956 pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
961 "pata_hpt37x: BIOS has not set timing clocks.\n");
962 957
963 /* This is the process the HPT371 BIOS is reported to use */ 958 /* This is the process the HPT371 BIOS is reported to use */
964 for (i = 0; i < 128; i++) { 959 for (i = 0; i < 128; i++) {
@@ -1014,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1014 (f_high << 16) | f_low | 0x100); 1009 (f_high << 16) | f_low | 0x100);
1015 } 1010 }
1016 if (adjust == 8) { 1011 if (adjust == 8) {
1017 printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n"); 1012 pr_err(DRV_NAME ": DPLL did not stabilize!\n");
1018 return -ENODEV; 1013 return -ENODEV;
1019 } 1014 }
1020 if (dpll == 3) 1015 if (dpll == 3)
@@ -1022,8 +1017,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1022 else 1017 else
1023 private_data = (void *)hpt37x_timings_50; 1018 private_data = (void *)hpt37x_timings_50;
1024 1019
1025 printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n", 1020 pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
1026 MHz[clock_slot], MHz[dpll]); 1021 MHz[clock_slot], MHz[dpll]);
1027 } else { 1022 } else {
1028 private_data = (void *)chip_table->clocks[clock_slot]; 1023 private_data = (void *)chip_table->clocks[clock_slot];
1029 /* 1024 /*
@@ -1036,8 +1031,9 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1036 ppi[0] = &info_hpt370_33; 1031 ppi[0] = &info_hpt370_33;
1037 if (clock_slot < 2 && ppi[0] == &info_hpt370a) 1032 if (clock_slot < 2 && ppi[0] == &info_hpt370a)
1038 ppi[0] = &info_hpt370a_33; 1033 ppi[0] = &info_hpt370a_33;
1039 printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n", 1034
1040 chip_table->name, MHz[clock_slot]); 1035 pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
1036 chip_table->name, MHz[clock_slot]);
1041 } 1037 }
1042 1038
1043 /* Now kick off ATA set up */ 1039 /* Now kick off ATA set up */
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index d2239bbdb798..eca68caf5f46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_hpt3x2n" 27#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3.13" 28#define DRV_VERSION "0.3.14"
29 29
30enum { 30enum {
31 HPT_PCI_FAST = (1 << 31), 31 HPT_PCI_FAST = (1 << 31),
@@ -418,7 +418,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
418 u16 sr; 418 u16 sr;
419 u32 total = 0; 419 u32 total = 0;
420 420
421 printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n"); 421 pr_warning(DRV_NAME ": BIOS clock data not set.\n");
422 422
423 /* This is the process the HPT371 BIOS is reported to use */ 423 /* This is the process the HPT371 BIOS is reported to use */
424 for (i = 0; i < 128; i++) { 424 for (i = 0; i < 128; i++) {
@@ -528,8 +528,7 @@ hpt372n:
528 ppi[0] = &info_hpt372n; 528 ppi[0] = &info_hpt372n;
529 break; 529 break;
530 default: 530 default:
531 printk(KERN_ERR 531 pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
532 "pata_hpt3x2n: PCI table is bogus please report (%d).\n",
533 dev->device); 532 dev->device);
534 return -ENODEV; 533 return -ENODEV;
535 } 534 }
@@ -579,12 +578,11 @@ hpt372n:
579 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low); 578 pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
580 } 579 }
581 if (adjust == 8) { 580 if (adjust == 8) {
582 printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n"); 581 pr_err(DRV_NAME ": DPLL did not stabilize!\n");
583 return -ENODEV; 582 return -ENODEV;
584 } 583 }
585 584
586 printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n", 585 pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
587 pci_mhz);
588 586
589 /* 587 /*
590 * Set our private data up. We only need a few flags 588 * Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 8cc536e49a0a..d7d8026cde99 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
610}; 610};
611 611
612static struct ata_port_operations mpc52xx_ata_port_ops = { 612static struct ata_port_operations mpc52xx_ata_port_ops = {
613 .inherits = &ata_sff_port_ops, 613 .inherits = &ata_bmdma_port_ops,
614 .sff_dev_select = mpc52xx_ata_dev_select, 614 .sff_dev_select = mpc52xx_ata_dev_select,
615 .set_piomode = mpc52xx_ata_set_piomode, 615 .set_piomode = mpc52xx_ata_set_piomode,
616 .set_dmamode = mpc52xx_ata_set_dmamode, 616 .set_dmamode = mpc52xx_ata_set_dmamode,
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bca9cb89a118..487a54739854 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -151,7 +151,7 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats __user *arg,int
151 spin_unlock_irqrestore(&idt77105_priv_lock, flags); 151 spin_unlock_irqrestore(&idt77105_priv_lock, flags);
152 if (arg == NULL) 152 if (arg == NULL)
153 return 0; 153 return 0;
154 return copy_to_user(arg, &PRIV(dev)->stats, 154 return copy_to_user(arg, &stats,
155 sizeof(struct idt77105_stats)) ? -EFAULT : 0; 155 sizeof(struct idt77105_stats)) ? -EFAULT : 0;
156} 156}
157 157
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd96345bc35c..d57e8d0fb823 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,7 +70,7 @@ config PREVENT_FIRMWARE_BUILD
70 If unsure say Y here. 70 If unsure say Y here.
71 71
72config FW_LOADER 72config FW_LOADER
73 tristate "Userspace firmware loading support" if EMBEDDED 73 tristate "Userspace firmware loading support" if EXPERT
74 default y 74 default y
75 ---help--- 75 ---help---
76 This option is provided for the case where no in-kernel-tree modules 76 This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ce012a9c6201..36b43052001d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -117,12 +117,21 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
117 "Node %d WritebackTmp: %8lu kB\n" 117 "Node %d WritebackTmp: %8lu kB\n"
118 "Node %d Slab: %8lu kB\n" 118 "Node %d Slab: %8lu kB\n"
119 "Node %d SReclaimable: %8lu kB\n" 119 "Node %d SReclaimable: %8lu kB\n"
120 "Node %d SUnreclaim: %8lu kB\n", 120 "Node %d SUnreclaim: %8lu kB\n"
121#ifdef CONFIG_TRANSPARENT_HUGEPAGE
122 "Node %d AnonHugePages: %8lu kB\n"
123#endif
124 ,
121 nid, K(node_page_state(nid, NR_FILE_DIRTY)), 125 nid, K(node_page_state(nid, NR_FILE_DIRTY)),
122 nid, K(node_page_state(nid, NR_WRITEBACK)), 126 nid, K(node_page_state(nid, NR_WRITEBACK)),
123 nid, K(node_page_state(nid, NR_FILE_PAGES)), 127 nid, K(node_page_state(nid, NR_FILE_PAGES)),
124 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 128 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
125 nid, K(node_page_state(nid, NR_ANON_PAGES)), 129 nid, K(node_page_state(nid, NR_ANON_PAGES)
130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
131 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
132 HPAGE_PMD_NR
133#endif
134 ),
126 nid, K(node_page_state(nid, NR_SHMEM)), 135 nid, K(node_page_state(nid, NR_SHMEM)),
127 nid, node_page_state(nid, NR_KERNEL_STACK) * 136 nid, node_page_state(nid, NR_KERNEL_STACK) *
128 THREAD_SIZE / 1024, 137 THREAD_SIZE / 1024,
@@ -133,7 +142,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
133 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 142 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
134 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 143 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
135 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 144 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
136 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); 145 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
146#ifdef CONFIG_TRANSPARENT_HUGEPAGE
147 , nid,
148 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
149 HPAGE_PMD_NR)
150#endif
151 );
137 n += hugetlb_report_node_meminfo(nid, buf + n); 152 n += hugetlb_report_node_meminfo(nid, buf + n);
138 return n; 153 return n;
139} 154}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 656493a5e073..42615b419dfb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -407,12 +407,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
407 goto out; 407 goto out;
408 } 408 }
409 409
410 /* Maybe the parent is now able to suspend. */
410 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 411 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
411 spin_unlock_irq(&dev->power.lock); 412 spin_unlock(&dev->power.lock);
412 413
413 pm_request_idle(parent); 414 spin_lock(&parent->power.lock);
415 rpm_idle(parent, RPM_ASYNC);
416 spin_unlock(&parent->power.lock);
414 417
415 spin_lock_irq(&dev->power.lock); 418 spin_lock(&dev->power.lock);
416 } 419 }
417 420
418 out: 421 out:
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d6312d..40528ba56d1b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ 39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o 40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
41 41
42swim_mod-objs := swim.o swim_asm.o 42swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997183c6..06ea82cdf27d 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_ATA_OVER_ETH) += aoe.o 5obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
6aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o 6aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 516d5bbec2b6..9279272b3732 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
2833 sector_t total_size; 2833 sector_t total_size;
2834 InquiryData_struct *inq_buff = NULL; 2834 InquiryData_struct *inq_buff = NULL;
2835 2835
2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2836 for (logvol = 0; logvol <= h->highest_lun; logvol++) {
2837 if (!h->drv[logvol]) 2837 if (!h->drv[logvol])
2838 continue; 2838 continue;
2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44e18c073c44..49e6a545eb63 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1641,6 +1641,9 @@ out:
1641 1641
1642static void loop_free(struct loop_device *lo) 1642static void loop_free(struct loop_device *lo)
1643{ 1643{
1644 if (!lo->lo_queue->queue_lock)
1645 lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
1646
1644 blk_cleanup_queue(lo->lo_queue); 1647 blk_cleanup_queue(lo->lo_queue);
1645 put_disk(lo->lo_disk); 1648 put_disk(lo->lo_disk);
1646 list_del(&lo->lo_list); 1649 list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41246f8..e6fc716aca45 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
53#define DBG_BLKDEV 0x0100 53#define DBG_BLKDEV 0x0100
54#define DBG_RX 0x0200 54#define DBG_RX 0x0200
55#define DBG_TX 0x0400 55#define DBG_TX 0x0400
56static DEFINE_MUTEX(nbd_mutex);
57static unsigned int debugflags; 56static unsigned int debugflags;
58#endif /* NDEBUG */ 57#endif /* NDEBUG */
59 58
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
718 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 717 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
719 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 718 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
720 719
721 mutex_lock(&nbd_mutex);
722 mutex_lock(&lo->tx_lock); 720 mutex_lock(&lo->tx_lock);
723 error = __nbd_ioctl(bdev, lo, cmd, arg); 721 error = __nbd_ioctl(bdev, lo, cmd, arg);
724 mutex_unlock(&lo->tx_lock); 722 mutex_unlock(&lo->tx_lock);
725 mutex_unlock(&nbd_mutex);
726 723
727 return error; 724 return error;
728} 725}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 949ed09c6361..a126e614601f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -47,46 +47,40 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
47#define USB_REQ_DFU_DNLOAD 1 47#define USB_REQ_DFU_DNLOAD 1
48#define BULK_SIZE 4096 48#define BULK_SIZE 4096
49 49
50struct ath3k_data { 50static int ath3k_load_firmware(struct usb_device *udev,
51 struct usb_device *udev; 51 const struct firmware *firmware)
52 u8 *fw_data;
53 u32 fw_size;
54 u32 fw_sent;
55};
56
57static int ath3k_load_firmware(struct ath3k_data *data,
58 unsigned char *firmware,
59 int count)
60{ 52{
61 u8 *send_buf; 53 u8 *send_buf;
62 int err, pipe, len, size, sent = 0; 54 int err, pipe, len, size, sent = 0;
55 int count = firmware->size;
63 56
64 BT_DBG("ath3k %p udev %p", data, data->udev); 57 BT_DBG("udev %p", udev);
65 58
66 pipe = usb_sndctrlpipe(data->udev, 0); 59 pipe = usb_sndctrlpipe(udev, 0);
67 60
68 if ((usb_control_msg(data->udev, pipe, 61 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
62 if (!send_buf) {
63 BT_ERR("Can't allocate memory chunk for firmware");
64 return -ENOMEM;
65 }
66
67 memcpy(send_buf, firmware->data, 20);
68 if ((err = usb_control_msg(udev, pipe,
69 USB_REQ_DFU_DNLOAD, 69 USB_REQ_DFU_DNLOAD,
70 USB_TYPE_VENDOR, 0, 0, 70 USB_TYPE_VENDOR, 0, 0,
71 firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) { 71 send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
72 BT_ERR("Can't change to loading configuration err"); 72 BT_ERR("Can't change to loading configuration err");
73 return -EBUSY; 73 goto error;
74 } 74 }
75 sent += 20; 75 sent += 20;
76 count -= 20; 76 count -= 20;
77 77
78 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
79 if (!send_buf) {
80 BT_ERR("Can't allocate memory chunk for firmware");
81 return -ENOMEM;
82 }
83
84 while (count) { 78 while (count) {
85 size = min_t(uint, count, BULK_SIZE); 79 size = min_t(uint, count, BULK_SIZE);
86 pipe = usb_sndbulkpipe(data->udev, 0x02); 80 pipe = usb_sndbulkpipe(udev, 0x02);
87 memcpy(send_buf, firmware + sent, size); 81 memcpy(send_buf, firmware->data + sent, size);
88 82
89 err = usb_bulk_msg(data->udev, pipe, send_buf, size, 83 err = usb_bulk_msg(udev, pipe, send_buf, size,
90 &len, 3000); 84 &len, 3000);
91 85
92 if (err || (len != size)) { 86 if (err || (len != size)) {
@@ -112,57 +106,28 @@ static int ath3k_probe(struct usb_interface *intf,
112{ 106{
113 const struct firmware *firmware; 107 const struct firmware *firmware;
114 struct usb_device *udev = interface_to_usbdev(intf); 108 struct usb_device *udev = interface_to_usbdev(intf);
115 struct ath3k_data *data;
116 int size;
117 109
118 BT_DBG("intf %p id %p", intf, id); 110 BT_DBG("intf %p id %p", intf, id);
119 111
120 if (intf->cur_altsetting->desc.bInterfaceNumber != 0) 112 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
121 return -ENODEV; 113 return -ENODEV;
122 114
123 data = kzalloc(sizeof(*data), GFP_KERNEL);
124 if (!data)
125 return -ENOMEM;
126
127 data->udev = udev;
128
129 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { 115 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
130 kfree(data);
131 return -EIO; 116 return -EIO;
132 } 117 }
133 118
134 size = max_t(uint, firmware->size, 4096); 119 if (ath3k_load_firmware(udev, firmware)) {
135 data->fw_data = kmalloc(size, GFP_KERNEL);
136 if (!data->fw_data) {
137 release_firmware(firmware); 120 release_firmware(firmware);
138 kfree(data);
139 return -ENOMEM;
140 }
141
142 memcpy(data->fw_data, firmware->data, firmware->size);
143 data->fw_size = firmware->size;
144 data->fw_sent = 0;
145 release_firmware(firmware);
146
147 usb_set_intfdata(intf, data);
148 if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
149 usb_set_intfdata(intf, NULL);
150 kfree(data->fw_data);
151 kfree(data);
152 return -EIO; 121 return -EIO;
153 } 122 }
123 release_firmware(firmware);
154 124
155 return 0; 125 return 0;
156} 126}
157 127
158static void ath3k_disconnect(struct usb_interface *intf) 128static void ath3k_disconnect(struct usb_interface *intf)
159{ 129{
160 struct ath3k_data *data = usb_get_intfdata(intf);
161
162 BT_DBG("ath3k_disconnect intf %p", intf); 130 BT_DBG("ath3k_disconnect intf %p", intf);
163
164 kfree(data->fw_data);
165 kfree(data);
166} 131}
167 132
168static struct usb_driver ath3k_driver = { 133static struct usb_driver ath3k_driver = {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 14033a36bcd0..e2c48a7eccff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi)
409 } 409 }
410 410
411 ENSURE(drive_status, CDC_DRIVE_STATUS ); 411 ENSURE(drive_status, CDC_DRIVE_STATUS );
412 ENSURE(media_changed, CDC_MEDIA_CHANGED); 412 if (cdo->check_events == NULL && cdo->media_changed == NULL)
413 *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
413 ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); 414 ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
414 ENSURE(lock_door, CDC_LOCK); 415 ENSURE(lock_door, CDC_LOCK);
415 ENSURE(select_speed, CDC_SELECT_SPEED); 416 ENSURE(select_speed, CDC_SELECT_SPEED);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0f175a866ef0..b7980a83ce2d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -5,7 +5,7 @@
5menu "Character devices" 5menu "Character devices"
6 6
7config VT 7config VT
8 bool "Virtual terminal" if EMBEDDED 8 bool "Virtual terminal" if EXPERT
9 depends on !S390 9 depends on !S390
10 select INPUT 10 select INPUT
11 default y 11 default y
@@ -39,13 +39,13 @@ config VT
39config CONSOLE_TRANSLATIONS 39config CONSOLE_TRANSLATIONS
40 depends on VT 40 depends on VT
41 default y 41 default y
42 bool "Enable character translations in console" if EMBEDDED 42 bool "Enable character translations in console" if EXPERT
43 ---help--- 43 ---help---
44 This enables support for font mapping and Unicode translation 44 This enables support for font mapping and Unicode translation
45 on virtual consoles. 45 on virtual consoles.
46 46
47config VT_CONSOLE 47config VT_CONSOLE
48 bool "Support for console on virtual terminal" if EMBEDDED 48 bool "Support for console on virtual terminal" if EXPERT
49 depends on VT 49 depends on VT
50 default y 50 default y
51 ---help--- 51 ---help---
@@ -426,10 +426,10 @@ config SGI_MBCS
426 If you have an SGI Altix with an attached SABrick 426 If you have an SGI Altix with an attached SABrick
427 say Y or M here, otherwise say N. 427 say Y or M here, otherwise say N.
428 428
429source "drivers/serial/Kconfig" 429source "drivers/tty/serial/Kconfig"
430 430
431config UNIX98_PTYS 431config UNIX98_PTYS
432 bool "Unix98 PTY support" if EMBEDDED 432 bool "Unix98 PTY support" if EXPERT
433 default y 433 default y
434 ---help--- 434 ---help---
435 A pseudo terminal (PTY) is a software device consisting of two 435 A pseudo terminal (PTY) is a software device consisting of two
@@ -495,7 +495,7 @@ config LEGACY_PTY_COUNT
495 495
496config TTY_PRINTK 496config TTY_PRINTK
497 bool "TTY driver to output user messages via printk" 497 bool "TTY driver to output user messages via printk"
498 depends on EMBEDDED 498 depends on EXPERT
499 default n 499 default n
500 ---help--- 500 ---help---
501 If you say Y here, the support for writing user messages (i.e. 501 If you say Y here, the support for writing user messages (i.e.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 1e9dffb33778..8238f89f73c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,17 +30,6 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
31obj-$(CONFIG_SX) += sx.o generic_serial.o 31obj-$(CONFIG_SX) += sx.o generic_serial.o
32obj-$(CONFIG_RIO) += rio/ generic_serial.o 32obj-$(CONFIG_RIO) += rio/ generic_serial.o
33obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
34obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
35obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
36obj-$(CONFIG_HVC_TILE) += hvc_tile.o
37obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
38obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
39obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
40obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
41obj-$(CONFIG_HVC_XEN) += hvc_xen.o
42obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
43obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
44obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o 33obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
45obj-$(CONFIG_RAW_DRIVER) += raw.o 34obj-$(CONFIG_RAW_DRIVER) += raw.o
46obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 35obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
@@ -48,7 +37,6 @@ obj-$(CONFIG_MSPEC) += mspec.o
48obj-$(CONFIG_MMTIMER) += mmtimer.o 37obj-$(CONFIG_MMTIMER) += mmtimer.o
49obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o 38obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
50obj-$(CONFIG_VIOTAPE) += viotape.o 39obj-$(CONFIG_VIOTAPE) += viotape.o
51obj-$(CONFIG_HVCS) += hvcs.o
52obj-$(CONFIG_IBM_BSR) += bsr.o 40obj-$(CONFIG_IBM_BSR) += bsr.o
53obj-$(CONFIG_SGI_MBCS) += mbcs.o 41obj-$(CONFIG_SGI_MBCS) += mbcs.o
54obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o 42obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index fcd867d923ba..d8b1b576556c 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@ config AGP_ATI
50 50
51config AGP_AMD 51config AGP_AMD
52 tristate "AMD Irongate, 761, and 762 chipset support" 52 tristate "AMD Irongate, 761, and 762 chipset support"
53 depends on AGP && (X86_32 || ALPHA) 53 depends on AGP && X86_32
54 help 54 help
55 This option gives you AGP support for the GLX component of 55 This option gives you AGP support for the GLX component of
56 X on AMD Irongate, 761, and 762 chipsets. 56 X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b1b4362bc648..45681c0ff3b6 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,22 +41,8 @@ static int amd_create_page_map(struct amd_page_map *page_map)
41 if (page_map->real == NULL) 41 if (page_map->real == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 43
44#ifndef CONFIG_X86
45 SetPageReserved(virt_to_page(page_map->real));
46 global_cache_flush();
47 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
48 PAGE_SIZE);
49 if (page_map->remapped == NULL) {
50 ClearPageReserved(virt_to_page(page_map->real));
51 free_page((unsigned long) page_map->real);
52 page_map->real = NULL;
53 return -ENOMEM;
54 }
55 global_cache_flush();
56#else
57 set_memory_uc((unsigned long)page_map->real, 1); 44 set_memory_uc((unsigned long)page_map->real, 1);
58 page_map->remapped = page_map->real; 45 page_map->remapped = page_map->real;
59#endif
60 46
61 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 47 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
62 writel(agp_bridge->scratch_page, page_map->remapped+i); 48 writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
68 54
69static void amd_free_page_map(struct amd_page_map *page_map) 55static void amd_free_page_map(struct amd_page_map *page_map)
70{ 56{
71#ifndef CONFIG_X86
72 iounmap(page_map->remapped);
73 ClearPageReserved(virt_to_page(page_map->real));
74#else
75 set_memory_wb((unsigned long)page_map->real, 1); 57 set_memory_wb((unsigned long)page_map->real, 1);
76#endif
77 free_page((unsigned long) page_map->real); 58 free_page((unsigned long) page_map->real);
78} 59}
79 60
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 07e9796fead7..b0a0dccc98c1 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -717,8 +717,8 @@ static const struct intel_agp_driver_description {
717 { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, 717 { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
718 { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, 718 { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
719 { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, 719 { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
720 { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver }, 720 { PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
721 { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver }, 721 { PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
722 { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, 722 { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
723 { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, 723 { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
724 { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, 724 { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
@@ -774,20 +774,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
774 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); 774 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
775 775
776 /* 776 /*
777 * If the device has not been properly setup, the following will catch
778 * the problem and should stop the system from crashing.
779 * 20030610 - hamish@zot.org
780 */
781 if (pci_enable_device(pdev)) {
782 dev_err(&pdev->dev, "can't enable PCI device\n");
783 agp_put_bridge(bridge);
784 return -ENODEV;
785 }
786
787 /*
788 * The following fixes the case where the BIOS has "forgotten" to 777 * The following fixes the case where the BIOS has "forgotten" to
789 * provide an address range for the GART. 778 * provide an address range for the GART.
790 * 20030610 - hamish@zot.org 779 * 20030610 - hamish@zot.org
780 * This happens before pci_enable_device() intentionally;
781 * calling pci_enable_device() before assigning the resource
782 * will result in the GART being disabled on machines with such
783 * BIOSs (the GART ends up with a BAR starting at 0, which
784 * conflicts a lot of other devices).
791 */ 785 */
792 r = &pdev->resource[0]; 786 r = &pdev->resource[0];
793 if (!r->start && r->end) { 787 if (!r->start && r->end) {
@@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
798 } 792 }
799 } 793 }
800 794
795 /*
796 * If the device has not been properly setup, the following will catch
797 * the problem and should stop the system from crashing.
798 * 20030610 - hamish@zot.org
799 */
800 if (pci_enable_device(pdev)) {
801 dev_err(&pdev->dev, "can't enable PCI device\n");
802 agp_put_bridge(bridge);
803 return -ENODEV;
804 }
805
801 /* Fill in the mode register */ 806 /* Fill in the mode register */
802 if (cap_ptr) { 807 if (cap_ptr) {
803 pci_read_config_dword(pdev, 808 pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e921b693412b..fab3d3265adb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -68,6 +68,7 @@ static struct _intel_private {
68 phys_addr_t gma_bus_addr; 68 phys_addr_t gma_bus_addr;
69 u32 PGETBL_save; 69 u32 PGETBL_save;
70 u32 __iomem *gtt; /* I915G */ 70 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */
71 int num_dcache_entries; 72 int num_dcache_entries;
72 union { 73 union {
73 void __iomem *i9xx_flush_page; 74 void __iomem *i9xx_flush_page;
@@ -869,21 +870,12 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
869 870
870static int intel_fake_agp_configure(void) 871static int intel_fake_agp_configure(void)
871{ 872{
872 int i;
873
874 if (!intel_enable_gtt()) 873 if (!intel_enable_gtt())
875 return -EIO; 874 return -EIO;
876 875
876 intel_private.clear_fake_agp = true;
877 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 877 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
878 878
879 for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
880 intel_private.driver->write_entry(intel_private.scratch_page_dma,
881 i, 0);
882 }
883 readl(intel_private.gtt+i-1); /* PCI Posting. */
884
885 global_cache_flush();
886
887 return 0; 879 return 0;
888} 880}
889 881
@@ -945,6 +937,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
945{ 937{
946 int ret = -EINVAL; 938 int ret = -EINVAL;
947 939
940 if (intel_private.clear_fake_agp) {
941 int start = intel_private.base.stolen_size / PAGE_SIZE;
942 int end = intel_private.base.gtt_mappable_entries;
943 intel_gtt_clear_range(start, end - start);
944 intel_private.clear_fake_agp = false;
945 }
946
948 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) 947 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
949 return i810_insert_dcache_entries(mem, pg_start, type); 948 return i810_insert_dcache_entries(mem, pg_start, type);
950 949
@@ -1361,7 +1360,7 @@ static const struct intel_gtt_driver_description {
1361 &i81x_gtt_driver}, 1360 &i81x_gtt_driver},
1362 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", 1361 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1363 &i8xx_gtt_driver}, 1362 &i8xx_gtt_driver},
1364 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", 1363 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1365 &i8xx_gtt_driver}, 1364 &i8xx_gtt_driver},
1366 { PCI_DEVICE_ID_INTEL_82854_IG, "854", 1365 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1367 &i8xx_gtt_driver}, 1366 &i8xx_gtt_driver},
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index e397df3ad98e..16402445f2b2 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -183,16 +183,16 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
183} 183}
184 184
185#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE 185#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
186# define acquire_console_sem() 186# define console_lock()
187# define release_console_sem() 187# define console_unlock()
188#endif 188#endif
189static int 189static int
190bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count) 190bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
191{ 191{
192 int i; 192 int i;
193 acquire_console_sem(); 193 console_lock();
194 i = bfin_jc_circ_write(buf, count); 194 i = bfin_jc_circ_write(buf, count);
195 release_console_sem(); 195 console_unlock();
196 wake_up_process(bfin_jc_kthread); 196 wake_up_process(bfin_jc_kthread);
197 return i; 197 return i;
198} 198}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2fe72f8edf44..38223e93aa98 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -970,6 +970,33 @@ out_kfree:
970} 970}
971EXPORT_SYMBOL(ipmi_create_user); 971EXPORT_SYMBOL(ipmi_create_user);
972 972
973int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
974{
975 int rv = 0;
976 ipmi_smi_t intf;
977 struct ipmi_smi_handlers *handlers;
978
979 mutex_lock(&ipmi_interfaces_mutex);
980 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
981 if (intf->intf_num == if_num)
982 goto found;
983 }
984 /* Not found, return an error */
985 rv = -EINVAL;
986 mutex_unlock(&ipmi_interfaces_mutex);
987 return rv;
988
989found:
990 handlers = intf->handlers;
991 rv = -ENOSYS;
992 if (handlers->get_smi_info)
993 rv = handlers->get_smi_info(intf->send_info, data);
994 mutex_unlock(&ipmi_interfaces_mutex);
995
996 return rv;
997}
998EXPORT_SYMBOL(ipmi_get_smi_info);
999
973static void free_user(struct kref *ref) 1000static void free_user(struct kref *ref)
974{ 1001{
975 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); 1002 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f27c04e18aaa..7855f9f45b8e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -57,6 +57,7 @@
57#include <asm/irq.h> 57#include <asm/irq.h>
58#include <linux/interrupt.h> 58#include <linux/interrupt.h>
59#include <linux/rcupdate.h> 59#include <linux/rcupdate.h>
60#include <linux/ipmi.h>
60#include <linux/ipmi_smi.h> 61#include <linux/ipmi_smi.h>
61#include <asm/io.h> 62#include <asm/io.h>
62#include "ipmi_si_sm.h" 63#include "ipmi_si_sm.h"
@@ -109,10 +110,6 @@ enum si_type {
109}; 110};
110static char *si_to_str[] = { "kcs", "smic", "bt" }; 111static char *si_to_str[] = { "kcs", "smic", "bt" };
111 112
112enum ipmi_addr_src {
113 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
114 SI_PCI, SI_DEVICETREE, SI_DEFAULT
115};
116static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", 113static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
117 "ACPI", "SMBIOS", "PCI", 114 "ACPI", "SMBIOS", "PCI",
118 "device-tree", "default" }; 115 "device-tree", "default" };
@@ -293,6 +290,7 @@ struct smi_info {
293 struct task_struct *thread; 290 struct task_struct *thread;
294 291
295 struct list_head link; 292 struct list_head link;
293 union ipmi_smi_info_union addr_info;
296}; 294};
297 295
298#define smi_inc_stat(smi, stat) \ 296#define smi_inc_stat(smi, stat) \
@@ -322,6 +320,7 @@ static int unload_when_empty = 1;
322static int add_smi(struct smi_info *smi); 320static int add_smi(struct smi_info *smi);
323static int try_smi_init(struct smi_info *smi); 321static int try_smi_init(struct smi_info *smi);
324static void cleanup_one_si(struct smi_info *to_clean); 322static void cleanup_one_si(struct smi_info *to_clean);
323static void cleanup_ipmi_si(void);
325 324
326static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 325static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
327static int register_xaction_notifier(struct notifier_block *nb) 326static int register_xaction_notifier(struct notifier_block *nb)
@@ -1188,6 +1187,18 @@ static int smi_start_processing(void *send_info,
1188 return 0; 1187 return 0;
1189} 1188}
1190 1189
1190static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1191{
1192 struct smi_info *smi = send_info;
1193
1194 data->addr_src = smi->addr_source;
1195 data->dev = smi->dev;
1196 data->addr_info = smi->addr_info;
1197 get_device(smi->dev);
1198
1199 return 0;
1200}
1201
1191static void set_maintenance_mode(void *send_info, int enable) 1202static void set_maintenance_mode(void *send_info, int enable)
1192{ 1203{
1193 struct smi_info *smi_info = send_info; 1204 struct smi_info *smi_info = send_info;
@@ -1199,6 +1210,7 @@ static void set_maintenance_mode(void *send_info, int enable)
1199static struct ipmi_smi_handlers handlers = { 1210static struct ipmi_smi_handlers handlers = {
1200 .owner = THIS_MODULE, 1211 .owner = THIS_MODULE,
1201 .start_processing = smi_start_processing, 1212 .start_processing = smi_start_processing,
1213 .get_smi_info = get_smi_info,
1202 .sender = sender, 1214 .sender = sender,
1203 .request_events = request_events, 1215 .request_events = request_events,
1204 .set_maintenance_mode = set_maintenance_mode, 1216 .set_maintenance_mode = set_maintenance_mode,
@@ -1930,7 +1942,8 @@ static void __devinit hardcode_find_bmc(void)
1930static int acpi_failure; 1942static int acpi_failure;
1931 1943
1932/* For GPE-type interrupts. */ 1944/* For GPE-type interrupts. */
1933static u32 ipmi_acpi_gpe(void *context) 1945static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
1946 u32 gpe_number, void *context)
1934{ 1947{
1935 struct smi_info *smi_info = context; 1948 struct smi_info *smi_info = context;
1936 unsigned long flags; 1949 unsigned long flags;
@@ -2158,6 +2171,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2158 printk(KERN_INFO PFX "probing via ACPI\n"); 2171 printk(KERN_INFO PFX "probing via ACPI\n");
2159 2172
2160 handle = acpi_dev->handle; 2173 handle = acpi_dev->handle;
2174 info->addr_info.acpi_info.acpi_handle = handle;
2161 2175
2162 /* _IFT tells us the interface type: KCS, BT, etc */ 2176 /* _IFT tells us the interface type: KCS, BT, etc */
2163 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); 2177 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
@@ -3437,16 +3451,7 @@ static int __devinit init_ipmi_si(void)
3437 mutex_lock(&smi_infos_lock); 3451 mutex_lock(&smi_infos_lock);
3438 if (unload_when_empty && list_empty(&smi_infos)) { 3452 if (unload_when_empty && list_empty(&smi_infos)) {
3439 mutex_unlock(&smi_infos_lock); 3453 mutex_unlock(&smi_infos_lock);
3440#ifdef CONFIG_PCI 3454 cleanup_ipmi_si();
3441 if (pci_registered)
3442 pci_unregister_driver(&ipmi_pci_driver);
3443#endif
3444
3445#ifdef CONFIG_PPC_OF
3446 if (of_registered)
3447 of_unregister_platform_driver(&ipmi_of_platform_driver);
3448#endif
3449 driver_unregister(&ipmi_driver.driver);
3450 printk(KERN_WARNING PFX 3455 printk(KERN_WARNING PFX
3451 "Unable to find any System Interface(s)\n"); 3456 "Unable to find any System Interface(s)\n");
3452 return -ENODEV; 3457 return -ENODEV;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 1f46f1cd9225..faf5a2c65926 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,12 +364,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
364 tpm_protected_ordinal_duration[ordinal & 364 tpm_protected_ordinal_duration[ordinal &
365 TPM_PROTECTED_ORDINAL_MASK]; 365 TPM_PROTECTED_ORDINAL_MASK];
366 366
367 if (duration_idx != TPM_UNDEFINED) 367 if (duration_idx != TPM_UNDEFINED) {
368 duration = chip->vendor.duration[duration_idx]; 368 duration = chip->vendor.duration[duration_idx];
369 if (duration <= 0) 369 /* if duration is 0, it's because chip->vendor.duration wasn't */
370 /* filled yet, so we set the lowest timeout just to give enough */
371 /* time for tpm_get_timeouts() to succeed */
372 return (duration <= 0 ? HZ : duration);
373 } else
370 return 2 * 60 * HZ; 374 return 2 * 60 * HZ;
371 else
372 return duration;
373} 375}
374EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); 376EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
375 377
@@ -575,9 +577,11 @@ duration:
575 if (rc) 577 if (rc)
576 return; 578 return;
577 579
578 if (be32_to_cpu(tpm_cmd.header.out.return_code) 580 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
579 != 3 * sizeof(u32)) 581 be32_to_cpu(tpm_cmd.header.out.length)
582 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
580 return; 583 return;
584
581 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 585 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
582 chip->vendor.duration[TPM_SHORT] = 586 chip->vendor.duration[TPM_SHORT] =
583 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); 587 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -937,6 +941,18 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
937} 941}
938EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 942EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
939 943
944ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
945 char *buf)
946{
947 struct tpm_chip *chip = dev_get_drvdata(dev);
948
949 return sprintf(buf, "%d %d %d\n",
950 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
951 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
952 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
953}
954EXPORT_SYMBOL_GPL(tpm_show_timeouts);
955
940ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, 956ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
941 const char *buf, size_t count) 957 const char *buf, size_t count)
942{ 958{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb031b69a..d84ff772c26f 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
56 char *); 56 char *);
57extern ssize_t tpm_show_temp_deactivated(struct device *, 57extern ssize_t tpm_show_temp_deactivated(struct device *,
58 struct device_attribute *attr, char *); 58 struct device_attribute *attr, char *);
59extern ssize_t tpm_show_timeouts(struct device *,
60 struct device_attribute *attr, char *);
59 61
60struct tpm_chip; 62struct tpm_chip;
61 63
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c17a305ecb28..0d1d38e5f266 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,6 +376,7 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL); 376 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
379static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
379 380
380static struct attribute *tis_attrs[] = { 381static struct attribute *tis_attrs[] = {
381 &dev_attr_pubek.attr, 382 &dev_attr_pubek.attr,
@@ -385,7 +386,8 @@ static struct attribute *tis_attrs[] = {
385 &dev_attr_owned.attr, 386 &dev_attr_owned.attr,
386 &dev_attr_temp_deactivated.attr, 387 &dev_attr_temp_deactivated.attr,
387 &dev_attr_caps.attr, 388 &dev_attr_caps.attr,
388 &dev_attr_cancel.attr, NULL, 389 &dev_attr_cancel.attr,
390 &dev_attr_timeouts.attr, NULL,
389}; 391};
390 392
391static struct attribute_group tis_attr_grp = { 393static struct attribute_group tis_attr_grp = {
@@ -493,9 +495,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
493 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 495 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
494 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 496 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
495 497
496 if (is_itpm(to_pnp_dev(dev)))
497 itpm = 1;
498
499 if (itpm) 498 if (itpm)
500 dev_info(dev, "Intel iTPM workaround enabled\n"); 499 dev_info(dev, "Intel iTPM workaround enabled\n");
501 500
@@ -637,6 +636,9 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
637 else 636 else
638 interrupts = 0; 637 interrupts = 0;
639 638
639 if (is_itpm(pnp_dev))
640 itpm = 1;
641
640 return tpm_tis_init(&pnp_dev->dev, start, len, irq); 642 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
641} 643}
642 644
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ced1d27..490393186338 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation 2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
3 * Copyright (C) 2009, 2010 Red Hat, Inc. 3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
31#include <linux/virtio_console.h> 32#include <linux/virtio_console.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34#include "hvc_console.h" 35#include "../tty/hvc/hvc_console.h"
35 36
36/* 37/*
37 * This is a global struct for storing common data for all the devices 38 * This is a global struct for storing common data for all the devices
@@ -1462,6 +1463,17 @@ static void control_work_handler(struct work_struct *work)
1462 spin_unlock(&portdev->cvq_lock); 1463 spin_unlock(&portdev->cvq_lock);
1463} 1464}
1464 1465
1466static void out_intr(struct virtqueue *vq)
1467{
1468 struct port *port;
1469
1470 port = find_port_by_vq(vq->vdev->priv, vq);
1471 if (!port)
1472 return;
1473
1474 wake_up_interruptible(&port->waitqueue);
1475}
1476
1465static void in_intr(struct virtqueue *vq) 1477static void in_intr(struct virtqueue *vq)
1466{ 1478{
1467 struct port *port; 1479 struct port *port;
@@ -1566,7 +1578,7 @@ static int init_vqs(struct ports_device *portdev)
1566 */ 1578 */
1567 j = 0; 1579 j = 0;
1568 io_callbacks[j] = in_intr; 1580 io_callbacks[j] = in_intr;
1569 io_callbacks[j + 1] = NULL; 1581 io_callbacks[j + 1] = out_intr;
1570 io_names[j] = "input"; 1582 io_names[j] = "input";
1571 io_names[j + 1] = "output"; 1583 io_names[j + 1] = "output";
1572 j += 2; 1584 j += 2;
@@ -1580,7 +1592,7 @@ static int init_vqs(struct ports_device *portdev)
1580 for (i = 1; i < nr_ports; i++) { 1592 for (i = 1; i < nr_ports; i++) {
1581 j += 2; 1593 j += 2;
1582 io_callbacks[j] = in_intr; 1594 io_callbacks[j] = in_intr;
1583 io_callbacks[j + 1] = NULL; 1595 io_callbacks[j + 1] = out_intr;
1584 io_names[j] = "input"; 1596 io_names[j] = "input";
1585 io_names[j + 1] = "output"; 1597 io_names[j + 1] = "output";
1586 } 1598 }
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index cfb0f5278415..effe7974aa9a 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -202,17 +202,21 @@ static int __init init_acpi_pm_clocksource(void)
202 printk(KERN_INFO "PM-Timer had inconsistent results:" 202 printk(KERN_INFO "PM-Timer had inconsistent results:"
203 " 0x%#llx, 0x%#llx - aborting.\n", 203 " 0x%#llx, 0x%#llx - aborting.\n",
204 value1, value2); 204 value1, value2);
205 pmtmr_ioport = 0;
205 return -EINVAL; 206 return -EINVAL;
206 } 207 }
207 if (i == ACPI_PM_READ_CHECKS) { 208 if (i == ACPI_PM_READ_CHECKS) {
208 printk(KERN_INFO "PM-Timer failed consistency check " 209 printk(KERN_INFO "PM-Timer failed consistency check "
209 " (0x%#llx) - aborting.\n", value1); 210 " (0x%#llx) - aborting.\n", value1);
211 pmtmr_ioport = 0;
210 return -ENODEV; 212 return -ENODEV;
211 } 213 }
212 } 214 }
213 215
214 if (verify_pmtmr_rate() != 0) 216 if (verify_pmtmr_rate() != 0){
217 pmtmr_ioport = 0;
215 return -ENODEV; 218 return -ENODEV;
219 }
216 220
217 return clocksource_register_hz(&clocksource_acpi_pm, 221 return clocksource_register_hz(&clocksource_acpi_pm,
218 PMTMR_TICKS_PER_SEC); 222 PMTMR_TICKS_PER_SEC);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 01b886e68822..79c47e88d5d1 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -196,9 +196,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
196 clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; 196 clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
197 clkevt.clkevt.cpumask = cpumask_of(0); 197 clkevt.clkevt.cpumask = cpumask_of(0);
198 198
199 setup_irq(irq, &tc_irqaction);
200
201 clockevents_register_device(&clkevt.clkevt); 199 clockevents_register_device(&clkevt.clkevt);
200
201 setup_irq(irq, &tc_irqaction);
202} 202}
203 203
204#else /* !CONFIG_GENERIC_CLOCKEVENTS */ 204#else /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a8c8d9c19d74..ca8ee8093d6c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,7 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
71 71
72config CPU_FREQ_DEFAULT_GOV_POWERSAVE 72config CPU_FREQ_DEFAULT_GOV_POWERSAVE
73 bool "powersave" 73 bool "powersave"
74 depends on EMBEDDED 74 depends on EXPERT
75 select CPU_FREQ_GOV_POWERSAVE 75 select CPU_FREQ_GOV_POWERSAVE
76 help 76 help
77 Use the CPUFreq governor 'powersave' as default. This sets 77 Use the CPUFreq governor 'powersave' as default. This sets
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 386888f10df0..bf5092455a8f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -96,7 +96,15 @@ static void cpuidle_idle_call(void)
96 96
97 /* enter the state and update stats */ 97 /* enter the state and update stats */
98 dev->last_state = target_state; 98 dev->last_state = target_state;
99
100 trace_power_start(POWER_CSTATE, next_state, dev->cpu);
101 trace_cpu_idle(next_state, dev->cpu);
102
99 dev->last_residency = target_state->enter(dev, target_state); 103 dev->last_residency = target_state->enter(dev, target_state);
104
105 trace_power_end(dev->cpu);
106 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
107
100 if (dev->last_state) 108 if (dev->last_state)
101 target_state = dev->last_state; 109 target_state = dev->last_state;
102 110
@@ -106,8 +114,6 @@ static void cpuidle_idle_call(void)
106 /* give the governor an opportunity to reflect on the outcome */ 114 /* give the governor an opportunity to reflect on the outcome */
107 if (cpuidle_curr_governor->reflect) 115 if (cpuidle_curr_governor->reflect)
108 cpuidle_curr_governor->reflect(dev); 116 cpuidle_curr_governor->reflect(dev);
109 trace_power_end(smp_processor_id());
110 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
111} 117}
112 118
113/** 119/**
@@ -155,6 +161,45 @@ void cpuidle_resume_and_unlock(void)
155 161
156EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 162EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
157 163
164#ifdef CONFIG_ARCH_HAS_CPU_RELAX
165static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
166{
167 ktime_t t1, t2;
168 s64 diff;
169 int ret;
170
171 t1 = ktime_get();
172 local_irq_enable();
173 while (!need_resched())
174 cpu_relax();
175
176 t2 = ktime_get();
177 diff = ktime_to_us(ktime_sub(t2, t1));
178 if (diff > INT_MAX)
179 diff = INT_MAX;
180
181 ret = (int) diff;
182 return ret;
183}
184
185static void poll_idle_init(struct cpuidle_device *dev)
186{
187 struct cpuidle_state *state = &dev->states[0];
188
189 cpuidle_set_statedata(state, NULL);
190
191 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
192 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
193 state->exit_latency = 0;
194 state->target_residency = 0;
195 state->power_usage = -1;
196 state->flags = 0;
197 state->enter = poll_idle;
198}
199#else
200static void poll_idle_init(struct cpuidle_device *dev) {}
201#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
202
158/** 203/**
159 * cpuidle_enable_device - enables idle PM for a CPU 204 * cpuidle_enable_device - enables idle PM for a CPU
160 * @dev: the CPU 205 * @dev: the CPU
@@ -179,6 +224,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
179 return ret; 224 return ret;
180 } 225 }
181 226
227 poll_idle_init(dev);
228
182 if ((ret = cpuidle_add_state_sysfs(dev))) 229 if ((ret = cpuidle_add_state_sysfs(dev)))
183 return ret; 230 return ret;
184 231
@@ -233,45 +280,6 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
233 280
234EXPORT_SYMBOL_GPL(cpuidle_disable_device); 281EXPORT_SYMBOL_GPL(cpuidle_disable_device);
235 282
236#ifdef CONFIG_ARCH_HAS_CPU_RELAX
237static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
238{
239 ktime_t t1, t2;
240 s64 diff;
241 int ret;
242
243 t1 = ktime_get();
244 local_irq_enable();
245 while (!need_resched())
246 cpu_relax();
247
248 t2 = ktime_get();
249 diff = ktime_to_us(ktime_sub(t2, t1));
250 if (diff > INT_MAX)
251 diff = INT_MAX;
252
253 ret = (int) diff;
254 return ret;
255}
256
257static void poll_idle_init(struct cpuidle_device *dev)
258{
259 struct cpuidle_state *state = &dev->states[0];
260
261 cpuidle_set_statedata(state, NULL);
262
263 snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
264 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
265 state->exit_latency = 0;
266 state->target_residency = 0;
267 state->power_usage = -1;
268 state->flags = CPUIDLE_FLAG_POLL;
269 state->enter = poll_idle;
270}
271#else
272static void poll_idle_init(struct cpuidle_device *dev) {}
273#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
274
275/** 283/**
276 * __cpuidle_register_device - internal register function called before register 284 * __cpuidle_register_device - internal register function called before register
277 * and enable routines 285 * and enable routines
@@ -292,8 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
292 300
293 init_completion(&dev->kobj_unregister); 301 init_completion(&dev->kobj_unregister);
294 302
295 poll_idle_init(dev);
296
297 /* 303 /*
298 * cpuidle driver should set the dev->power_specified bit 304 * cpuidle driver should set the dev->power_specified bit
299 * before registering the device if the driver provides 305 * before registering the device if the driver provides
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef138731c0ea..1c28816152fa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,11 +200,16 @@ config PL330_DMA
200 platform_data for a dma-pl330 device. 200 platform_data for a dma-pl330 device.
201 201
202config PCH_DMA 202config PCH_DMA
203 tristate "Topcliff (Intel EG20T) PCH DMA support" 203 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
204 depends on PCI && X86 204 depends on PCI && X86
205 select DMA_ENGINE 205 select DMA_ENGINE
206 help 206 help
207 Enable support for the Topcliff (Intel EG20T) PCH DMA engine. 207 Enable support for Intel EG20T PCH DMA engine.
208
209 This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
210 Output Hub) which is for IVI(In-Vehicle Infotainment) use.
211 ML7213 is companion chip for Intel Atom E6xx series.
212 ML7213 is completely compatible for Intel EG20T PCH.
208 213
209config IMX_SDMA 214config IMX_SDMA
210 tristate "i.MX SDMA support" 215 tristate "i.MX SDMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b605cc9ac3a2..297f48b0cba9 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -19,14 +19,14 @@
19 * this program; if not, write to the Free Software Foundation, Inc., 59 19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 * The full GNU General Public License is iin this distribution in the 22 * The full GNU General Public License is in this distribution in the file
23 * file called COPYING. 23 * called COPYING.
24 * 24 *
25 * Documentation: ARM DDI 0196G == PL080 25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081 26 * Documentation: ARM DDI 0218E == PL081
27 * 27 *
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to 28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * any channel. 29 * channel.
30 * 30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081 31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels 32 * has only two channels. So on these DMA controllers the number of channels
@@ -53,7 +53,23 @@
53 * 53 *
54 * ASSUMES default (little) endianness for DMA transfers 54 * ASSUMES default (little) endianness for DMA transfers
55 * 55 *
56 * Only DMAC flow control is implemented 56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
57 * 73 *
58 * Global TODO: 74 * Global TODO:
59 * - Break out common code from arch/arm/mach-s3c64xx and share 75 * - Break out common code from arch/arm/mach-s3c64xx and share
@@ -61,50 +77,39 @@
61#include <linux/device.h> 77#include <linux/device.h>
62#include <linux/init.h> 78#include <linux/init.h>
63#include <linux/module.h> 79#include <linux/module.h>
64#include <linux/pci.h>
65#include <linux/interrupt.h> 80#include <linux/interrupt.h>
66#include <linux/slab.h> 81#include <linux/slab.h>
67#include <linux/dmapool.h> 82#include <linux/dmapool.h>
68#include <linux/amba/bus.h>
69#include <linux/dmaengine.h> 83#include <linux/dmaengine.h>
84#include <linux/amba/bus.h>
70#include <linux/amba/pl08x.h> 85#include <linux/amba/pl08x.h>
71#include <linux/debugfs.h> 86#include <linux/debugfs.h>
72#include <linux/seq_file.h> 87#include <linux/seq_file.h>
73 88
74#include <asm/hardware/pl080.h> 89#include <asm/hardware/pl080.h>
75#include <asm/dma.h>
76#include <asm/mach/dma.h>
77#include <asm/atomic.h>
78#include <asm/processor.h>
79#include <asm/cacheflush.h>
80 90
81#define DRIVER_NAME "pl08xdmac" 91#define DRIVER_NAME "pl08xdmac"
82 92
83/** 93/**
84 * struct vendor_data - vendor-specific config parameters 94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
85 * for PL08x derivates
86 * @name: the name of this specific variant
87 * @channels: the number of channels available in this variant 95 * @channels: the number of channels available in this variant
88 * @dualmaster: whether this version supports dual AHB masters 96 * @dualmaster: whether this version supports dual AHB masters or not.
89 * or not.
90 */ 97 */
91struct vendor_data { 98struct vendor_data {
92 char *name;
93 u8 channels; 99 u8 channels;
94 bool dualmaster; 100 bool dualmaster;
95}; 101};
96 102
97/* 103/*
98 * PL08X private data structures 104 * PL08X private data structures
99 * An LLI struct - see pl08x TRM 105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
100 * Note that next uses bit[0] as a bus bit, 106 * start & end do not - their bus bit info is in cctl. Also note that these
101 * start & end do not - their bus bit info 107 * are fixed 32-bit quantities.
102 * is in cctl
103 */ 108 */
104struct lli { 109struct pl08x_lli {
105 dma_addr_t src; 110 u32 src;
106 dma_addr_t dst; 111 u32 dst;
107 dma_addr_t next; 112 u32 lli;
108 u32 cctl; 113 u32 cctl;
109}; 114};
110 115
@@ -119,6 +124,8 @@ struct lli {
119 * @phy_chans: array of data for the physical channels 124 * @phy_chans: array of data for the physical channels
120 * @pool: a pool for the LLI descriptors 125 * @pool: a pool for the LLI descriptors
121 * @pool_ctr: counter of LLIs in the pool 126 * @pool_ctr: counter of LLIs in the pool
127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
128 * @mem_buses: set to indicate memory transfers on AHB2.
122 * @lock: a spinlock for this struct 129 * @lock: a spinlock for this struct
123 */ 130 */
124struct pl08x_driver_data { 131struct pl08x_driver_data {
@@ -126,11 +133,13 @@ struct pl08x_driver_data {
126 struct dma_device memcpy; 133 struct dma_device memcpy;
127 void __iomem *base; 134 void __iomem *base;
128 struct amba_device *adev; 135 struct amba_device *adev;
129 struct vendor_data *vd; 136 const struct vendor_data *vd;
130 struct pl08x_platform_data *pd; 137 struct pl08x_platform_data *pd;
131 struct pl08x_phy_chan *phy_chans; 138 struct pl08x_phy_chan *phy_chans;
132 struct dma_pool *pool; 139 struct dma_pool *pool;
133 int pool_ctr; 140 int pool_ctr;
141 u8 lli_buses;
142 u8 mem_buses;
134 spinlock_t lock; 143 spinlock_t lock;
135}; 144};
136 145
@@ -152,9 +161,9 @@ struct pl08x_driver_data {
152/* Size (bytes) of each LLI buffer allocated for one transfer */ 161/* Size (bytes) of each LLI buffer allocated for one transfer */
153# define PL08X_LLI_TSFR_SIZE 0x2000 162# define PL08X_LLI_TSFR_SIZE 0x2000
154 163
155/* Maximimum times we call dma_pool_alloc on this pool without freeing */ 164/* Maximum times we call dma_pool_alloc on this pool without freeing */
156#define PL08X_MAX_ALLOCS 0x40 165#define PL08X_MAX_ALLOCS 0x40
157#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) 166#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
158#define PL08X_ALIGN 8 167#define PL08X_ALIGN 8
159 168
160static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 169static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -162,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
162 return container_of(chan, struct pl08x_dma_chan, chan); 171 return container_of(chan, struct pl08x_dma_chan, chan);
163} 172}
164 173
174static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
175{
176 return container_of(tx, struct pl08x_txd, tx);
177}
178
165/* 179/*
166 * Physical channel handling 180 * Physical channel handling
167 */ 181 */
@@ -177,88 +191,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
177 191
178/* 192/*
179 * Set the initial DMA register values i.e. those for the first LLI 193 * Set the initial DMA register values i.e. those for the first LLI
180 * The next lli pointer and the configuration interrupt bit have 194 * The next LLI pointer and the configuration interrupt bit have
181 * been set when the LLIs were constructed 195 * been set when the LLIs were constructed. Poke them into the hardware
196 * and start the transfer.
182 */ 197 */
183static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, 198static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
184 struct pl08x_phy_chan *ch) 199 struct pl08x_txd *txd)
185{
186 /* Wait for channel inactive */
187 while (pl08x_phy_channel_busy(ch))
188 ;
189
190 dev_vdbg(&pl08x->adev->dev,
191 "WRITE channel %d: csrc=%08x, cdst=%08x, "
192 "cctl=%08x, clli=%08x, ccfg=%08x\n",
193 ch->id,
194 ch->csrc,
195 ch->cdst,
196 ch->cctl,
197 ch->clli,
198 ch->ccfg);
199
200 writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR);
201 writel(ch->cdst, ch->base + PL080_CH_DST_ADDR);
202 writel(ch->clli, ch->base + PL080_CH_LLI);
203 writel(ch->cctl, ch->base + PL080_CH_CONTROL);
204 writel(ch->ccfg, ch->base + PL080_CH_CONFIG);
205}
206
207static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan)
208{ 200{
209 struct pl08x_channel_data *cd = plchan->cd; 201 struct pl08x_driver_data *pl08x = plchan->host;
210 struct pl08x_phy_chan *phychan = plchan->phychan; 202 struct pl08x_phy_chan *phychan = plchan->phychan;
211 struct pl08x_txd *txd = plchan->at; 203 struct pl08x_lli *lli = &txd->llis_va[0];
212
213 /* Copy the basic control register calculated at transfer config */
214 phychan->csrc = txd->csrc;
215 phychan->cdst = txd->cdst;
216 phychan->clli = txd->clli;
217 phychan->cctl = txd->cctl;
218
219 /* Assign the signal to the proper control registers */
220 phychan->ccfg = cd->ccfg;
221 phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK;
222 phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK;
223 /* If it wasn't set from AMBA, ignore it */
224 if (txd->direction == DMA_TO_DEVICE)
225 /* Select signal as destination */
226 phychan->ccfg |=
227 (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT);
228 else if (txd->direction == DMA_FROM_DEVICE)
229 /* Select signal as source */
230 phychan->ccfg |=
231 (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT);
232 /* Always enable error interrupts */
233 phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK;
234 /* Always enable terminal interrupts */
235 phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK;
236}
237
238/*
239 * Enable the DMA channel
240 * Assumes all other configuration bits have been set
241 * as desired before this code is called
242 */
243static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
244 struct pl08x_phy_chan *ch)
245{
246 u32 val; 204 u32 val;
247 205
248 /* 206 plchan->at = txd;
249 * Do not access config register until channel shows as disabled
250 */
251 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id))
252 ;
253 207
254 /* 208 /* Wait for channel inactive */
255 * Do not access config register until channel shows as inactive 209 while (pl08x_phy_channel_busy(phychan))
256 */ 210 cpu_relax();
257 val = readl(ch->base + PL080_CH_CONFIG); 211
212 dev_vdbg(&pl08x->adev->dev,
213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
215 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
216 txd->ccfg);
217
218 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
219 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
220 writel(lli->lli, phychan->base + PL080_CH_LLI);
221 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
222 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
223
224 /* Enable the DMA channel */
225 /* Do not access config register until channel shows as disabled */
226 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
227 cpu_relax();
228
229 /* Do not access config register until channel shows as inactive */
230 val = readl(phychan->base + PL080_CH_CONFIG);
258 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 231 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
259 val = readl(ch->base + PL080_CH_CONFIG); 232 val = readl(phychan->base + PL080_CH_CONFIG);
260 233
261 writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); 234 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
262} 235}
263 236
264/* 237/*
@@ -266,10 +239,8 @@ static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x,
266 * 239 *
267 * Disabling individual channels could lose data. 240 * Disabling individual channels could lose data.
268 * 241 *
269 * Disable the peripheral DMA after disabling the DMAC 242 * Disable the peripheral DMA after disabling the DMAC in order to allow
270 * in order to allow the DMAC FIFO to drain, and 243 * the DMAC FIFO to drain, and hence allow the channel to show inactive
271 * hence allow the channel to show inactive
272 *
273 */ 244 */
274static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
275{ 246{
@@ -282,7 +253,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
282 253
283 /* Wait for channel inactive */ 254 /* Wait for channel inactive */
284 while (pl08x_phy_channel_busy(ch)) 255 while (pl08x_phy_channel_busy(ch))
285 ; 256 cpu_relax();
286} 257}
287 258
288static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -333,54 +304,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
333static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 304static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
334{ 305{
335 struct pl08x_phy_chan *ch; 306 struct pl08x_phy_chan *ch;
336 struct pl08x_txd *txdi = NULL;
337 struct pl08x_txd *txd; 307 struct pl08x_txd *txd;
338 unsigned long flags; 308 unsigned long flags;
339 u32 bytes = 0; 309 size_t bytes = 0;
340 310
341 spin_lock_irqsave(&plchan->lock, flags); 311 spin_lock_irqsave(&plchan->lock, flags);
342
343 ch = plchan->phychan; 312 ch = plchan->phychan;
344 txd = plchan->at; 313 txd = plchan->at;
345 314
346 /* 315 /*
347 * Next follow the LLIs to get the number of pending bytes in the 316 * Follow the LLIs to get the number of remaining
348 * currently active transaction. 317 * bytes in the currently active transaction.
349 */ 318 */
350 if (ch && txd) { 319 if (ch && txd) {
351 struct lli *llis_va = txd->llis_va; 320 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
352 struct lli *llis_bus = (struct lli *) txd->llis_bus;
353 u32 clli = readl(ch->base + PL080_CH_LLI);
354 321
355 /* First get the bytes in the current active LLI */ 322 /* First get the remaining bytes in the active transfer */
356 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 323 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
357 324
358 if (clli) { 325 if (clli) {
359 int i = 0; 326 struct pl08x_lli *llis_va = txd->llis_va;
327 dma_addr_t llis_bus = txd->llis_bus;
328 int index;
329
330 BUG_ON(clli < llis_bus || clli >= llis_bus +
331 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
332
333 /*
334 * Locate the next LLI - as this is an array,
335 * it's simple maths to find.
336 */
337 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
360 338
361 /* Forward to the LLI pointed to by clli */ 339 for (; index < MAX_NUM_TSFR_LLIS; index++) {
362 while ((clli != (u32) &(llis_bus[i])) && 340 bytes += get_bytes_in_cctl(llis_va[index].cctl);
363 (i < MAX_NUM_TSFR_LLIS))
364 i++;
365 341
366 while (clli) {
367 bytes += get_bytes_in_cctl(llis_va[i].cctl);
368 /* 342 /*
369 * A clli of 0x00000000 will terminate the 343 * A LLI pointer of 0 terminates the LLI list
370 * LLI list
371 */ 344 */
372 clli = llis_va[i].next; 345 if (!llis_va[index].lli)
373 i++; 346 break;
374 } 347 }
375 } 348 }
376 } 349 }
377 350
378 /* Sum up all queued transactions */ 351 /* Sum up all queued transactions */
379 if (!list_empty(&plchan->desc_list)) { 352 if (!list_empty(&plchan->pend_list)) {
380 list_for_each_entry(txdi, &plchan->desc_list, node) { 353 struct pl08x_txd *txdi;
354 list_for_each_entry(txdi, &plchan->pend_list, node) {
381 bytes += txdi->len; 355 bytes += txdi->len;
382 } 356 }
383
384 } 357 }
385 358
386 spin_unlock_irqrestore(&plchan->lock, flags); 359 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -390,6 +363,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
390 363
391/* 364/*
392 * Allocate a physical channel for a virtual channel 365 * Allocate a physical channel for a virtual channel
366 *
367 * Try to locate a physical channel to be used for this transfer. If all
368 * are taken return NULL and the requester will have to cope by using
369 * some fallback PIO mode or retrying later.
393 */ 370 */
394static struct pl08x_phy_chan * 371static struct pl08x_phy_chan *
395pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 372pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
@@ -399,12 +376,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
399 unsigned long flags; 376 unsigned long flags;
400 int i; 377 int i;
401 378
402 /*
403 * Try to locate a physical channel to be used for
404 * this transfer. If all are taken return NULL and
405 * the requester will have to cope by using some fallback
406 * PIO mode or retrying later.
407 */
408 for (i = 0; i < pl08x->vd->channels; i++) { 379 for (i = 0; i < pl08x->vd->channels; i++) {
409 ch = &pl08x->phy_chans[i]; 380 ch = &pl08x->phy_chans[i];
410 381
@@ -465,11 +436,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
465} 436}
466 437
467static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 438static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
468 u32 tsize) 439 size_t tsize)
469{ 440{
470 u32 retbits = cctl; 441 u32 retbits = cctl;
471 442
472 /* Remove all src, dst and transfersize bits */ 443 /* Remove all src, dst and transfer size bits */
473 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 444 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
474 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 445 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
475 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 446 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
@@ -509,95 +480,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
509 return retbits; 480 return retbits;
510} 481}
511 482
483struct pl08x_lli_build_data {
484 struct pl08x_txd *txd;
485 struct pl08x_driver_data *pl08x;
486 struct pl08x_bus_data srcbus;
487 struct pl08x_bus_data dstbus;
488 size_t remainder;
489};
490
512/* 491/*
513 * Autoselect a master bus to use for the transfer 492 * Autoselect a master bus to use for the transfer this prefers the
514 * this prefers the destination bus if both available 493 * destination bus if both available if fixed address on one bus the
515 * if fixed address on one bus the other will be chosen 494 * other will be chosen
516 */ 495 */
517void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, 496static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
518 struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, 497 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
519 struct pl08x_bus_data **sbus, u32 cctl)
520{ 498{
521 if (!(cctl & PL080_CONTROL_DST_INCR)) { 499 if (!(cctl & PL080_CONTROL_DST_INCR)) {
522 *mbus = src_bus; 500 *mbus = &bd->srcbus;
523 *sbus = dst_bus; 501 *sbus = &bd->dstbus;
524 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 502 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
525 *mbus = dst_bus; 503 *mbus = &bd->dstbus;
526 *sbus = src_bus; 504 *sbus = &bd->srcbus;
527 } else { 505 } else {
528 if (dst_bus->buswidth == 4) { 506 if (bd->dstbus.buswidth == 4) {
529 *mbus = dst_bus; 507 *mbus = &bd->dstbus;
530 *sbus = src_bus; 508 *sbus = &bd->srcbus;
531 } else if (src_bus->buswidth == 4) { 509 } else if (bd->srcbus.buswidth == 4) {
532 *mbus = src_bus; 510 *mbus = &bd->srcbus;
533 *sbus = dst_bus; 511 *sbus = &bd->dstbus;
534 } else if (dst_bus->buswidth == 2) { 512 } else if (bd->dstbus.buswidth == 2) {
535 *mbus = dst_bus; 513 *mbus = &bd->dstbus;
536 *sbus = src_bus; 514 *sbus = &bd->srcbus;
537 } else if (src_bus->buswidth == 2) { 515 } else if (bd->srcbus.buswidth == 2) {
538 *mbus = src_bus; 516 *mbus = &bd->srcbus;
539 *sbus = dst_bus; 517 *sbus = &bd->dstbus;
540 } else { 518 } else {
541 /* src_bus->buswidth == 1 */ 519 /* bd->srcbus.buswidth == 1 */
542 *mbus = dst_bus; 520 *mbus = &bd->dstbus;
543 *sbus = src_bus; 521 *sbus = &bd->srcbus;
544 } 522 }
545 } 523 }
546} 524}
547 525
548/* 526/*
549 * Fills in one LLI for a certain transfer descriptor 527 * Fills in one LLI for a certain transfer descriptor and advance the counter
550 * and advance the counter
551 */ 528 */
552int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 529static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
553 struct pl08x_txd *txd, int num_llis, int len, 530 int num_llis, int len, u32 cctl)
554 u32 cctl, u32 *remainder)
555{ 531{
556 struct lli *llis_va = txd->llis_va; 532 struct pl08x_lli *llis_va = bd->txd->llis_va;
557 struct lli *llis_bus = (struct lli *) txd->llis_bus; 533 dma_addr_t llis_bus = bd->txd->llis_bus;
558 534
559 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 535 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
560 536
561 llis_va[num_llis].cctl = cctl; 537 llis_va[num_llis].cctl = cctl;
562 llis_va[num_llis].src = txd->srcbus.addr; 538 llis_va[num_llis].src = bd->srcbus.addr;
563 llis_va[num_llis].dst = txd->dstbus.addr; 539 llis_va[num_llis].dst = bd->dstbus.addr;
564 540 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
565 /* 541 if (bd->pl08x->lli_buses & PL08X_AHB2)
566 * On versions with dual masters, you can optionally AND on 542 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
567 * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read
568 * in new LLIs with that controller, but we always try to
569 * choose AHB1 to point into memory. The idea is to have AHB2
570 * fixed on the peripheral and AHB1 messing around in the
571 * memory. So we don't manipulate this bit currently.
572 */
573
574 llis_va[num_llis].next =
575 (dma_addr_t)((u32) &(llis_bus[num_llis + 1]));
576 543
577 if (cctl & PL080_CONTROL_SRC_INCR) 544 if (cctl & PL080_CONTROL_SRC_INCR)
578 txd->srcbus.addr += len; 545 bd->srcbus.addr += len;
579 if (cctl & PL080_CONTROL_DST_INCR) 546 if (cctl & PL080_CONTROL_DST_INCR)
580 txd->dstbus.addr += len; 547 bd->dstbus.addr += len;
581 548
582 *remainder -= len; 549 BUG_ON(bd->remainder < len);
583 550
584 return num_llis + 1; 551 bd->remainder -= len;
585} 552}
586 553
587/* 554/*
588 * Return number of bytes to fill to boundary, or len 555 * Return number of bytes to fill to boundary, or len.
556 * This calculation works for any value of addr.
589 */ 557 */
590static inline u32 pl08x_pre_boundary(u32 addr, u32 len) 558static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
591{ 559{
592 u32 boundary; 560 size_t boundary_len = PL08X_BOUNDARY_SIZE -
593 561 (addr & (PL08X_BOUNDARY_SIZE - 1));
594 boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1)
595 << PL08X_BOUNDARY_SHIFT;
596 562
597 if (boundary < addr + len) 563 return min(boundary_len, len);
598 return boundary - addr;
599 else
600 return len;
601} 564}
602 565
603/* 566/*
@@ -608,20 +571,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len)
608static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 571static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
609 struct pl08x_txd *txd) 572 struct pl08x_txd *txd)
610{ 573{
611 struct pl08x_channel_data *cd = txd->cd;
612 struct pl08x_bus_data *mbus, *sbus; 574 struct pl08x_bus_data *mbus, *sbus;
613 u32 remainder; 575 struct pl08x_lli_build_data bd;
614 int num_llis = 0; 576 int num_llis = 0;
615 u32 cctl; 577 u32 cctl;
616 int max_bytes_per_lli; 578 size_t max_bytes_per_lli;
617 int total_bytes = 0; 579 size_t total_bytes = 0;
618 struct lli *llis_va; 580 struct pl08x_lli *llis_va;
619 struct lli *llis_bus;
620
621 if (!txd) {
622 dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__);
623 return 0;
624 }
625 581
626 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 582 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
627 &txd->llis_bus); 583 &txd->llis_bus);
@@ -632,121 +588,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
632 588
633 pl08x->pool_ctr++; 589 pl08x->pool_ctr++;
634 590
635 /* 591 /* Get the default CCTL */
636 * Initialize bus values for this transfer 592 cctl = txd->cctl;
637 * from the passed optimal values
638 */
639 if (!cd) {
640 dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__);
641 return 0;
642 }
643 593
644 /* Get the default CCTL from the platform data */ 594 bd.txd = txd;
645 cctl = cd->cctl; 595 bd.pl08x = pl08x;
646 596 bd.srcbus.addr = txd->src_addr;
647 /* 597 bd.dstbus.addr = txd->dst_addr;
648 * On the PL080 we have two bus masters and we
649 * should select one for source and one for
650 * destination. We try to use AHB2 for the
651 * bus which does not increment (typically the
652 * peripheral) else we just choose something.
653 */
654 cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
655 if (pl08x->vd->dualmaster) {
656 if (cctl & PL080_CONTROL_SRC_INCR)
657 /* Source increments, use AHB2 for destination */
658 cctl |= PL080_CONTROL_DST_AHB2;
659 else if (cctl & PL080_CONTROL_DST_INCR)
660 /* Destination increments, use AHB2 for source */
661 cctl |= PL080_CONTROL_SRC_AHB2;
662 else
663 /* Just pick something, source AHB1 dest AHB2 */
664 cctl |= PL080_CONTROL_DST_AHB2;
665 }
666 598
667 /* Find maximum width of the source bus */ 599 /* Find maximum width of the source bus */
668 txd->srcbus.maxwidth = 600 bd.srcbus.maxwidth =
669 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 601 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
670 PL080_CONTROL_SWIDTH_SHIFT); 602 PL080_CONTROL_SWIDTH_SHIFT);
671 603
672 /* Find maximum width of the destination bus */ 604 /* Find maximum width of the destination bus */
673 txd->dstbus.maxwidth = 605 bd.dstbus.maxwidth =
674 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 606 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
675 PL080_CONTROL_DWIDTH_SHIFT); 607 PL080_CONTROL_DWIDTH_SHIFT);
676 608
677 /* Set up the bus widths to the maximum */ 609 /* Set up the bus widths to the maximum */
678 txd->srcbus.buswidth = txd->srcbus.maxwidth; 610 bd.srcbus.buswidth = bd.srcbus.maxwidth;
679 txd->dstbus.buswidth = txd->dstbus.maxwidth; 611 bd.dstbus.buswidth = bd.dstbus.maxwidth;
680 dev_vdbg(&pl08x->adev->dev, 612 dev_vdbg(&pl08x->adev->dev,
681 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 613 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
682 __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); 614 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
683 615
684 616
685 /* 617 /*
686 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 618 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
687 */ 619 */
688 max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * 620 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
689 PL080_CONTROL_TRANSFER_SIZE_MASK; 621 PL080_CONTROL_TRANSFER_SIZE_MASK;
690 dev_vdbg(&pl08x->adev->dev, 622 dev_vdbg(&pl08x->adev->dev,
691 "%s max bytes per lli = %d\n", 623 "%s max bytes per lli = %zu\n",
692 __func__, max_bytes_per_lli); 624 __func__, max_bytes_per_lli);
693 625
694 /* We need to count this down to zero */ 626 /* We need to count this down to zero */
695 remainder = txd->len; 627 bd.remainder = txd->len;
696 dev_vdbg(&pl08x->adev->dev, 628 dev_vdbg(&pl08x->adev->dev,
697 "%s remainder = %d\n", 629 "%s remainder = %zu\n",
698 __func__, remainder); 630 __func__, bd.remainder);
699 631
700 /* 632 /*
701 * Choose bus to align to 633 * Choose bus to align to
702 * - prefers destination bus if both available 634 * - prefers destination bus if both available
703 * - if fixed address on one bus chooses other 635 * - if fixed address on one bus chooses other
704 * - modifies cctl to choose an apropriate master
705 */
706 pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus,
707 &mbus, &sbus, cctl);
708
709
710 /*
711 * The lowest bit of the LLI register
712 * is also used to indicate which master to
713 * use for reading the LLIs.
714 */ 636 */
637 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
715 638
716 if (txd->len < mbus->buswidth) { 639 if (txd->len < mbus->buswidth) {
717 /* 640 /* Less than a bus width available - send as single bytes */
718 * Less than a bus width available 641 while (bd.remainder) {
719 * - send as single bytes
720 */
721 while (remainder) {
722 dev_vdbg(&pl08x->adev->dev, 642 dev_vdbg(&pl08x->adev->dev,
723 "%s single byte LLIs for a transfer of " 643 "%s single byte LLIs for a transfer of "
724 "less than a bus width (remain %08x)\n", 644 "less than a bus width (remain 0x%08x)\n",
725 __func__, remainder); 645 __func__, bd.remainder);
726 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 646 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
727 num_llis = 647 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
728 pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1,
729 cctl, &remainder);
730 total_bytes++; 648 total_bytes++;
731 } 649 }
732 } else { 650 } else {
733 /* 651 /* Make one byte LLIs until master bus is aligned */
734 * Make one byte LLIs until master bus is aligned
735 * - slave will then be aligned also
736 */
737 while ((mbus->addr) % (mbus->buswidth)) { 652 while ((mbus->addr) % (mbus->buswidth)) {
738 dev_vdbg(&pl08x->adev->dev, 653 dev_vdbg(&pl08x->adev->dev,
739 "%s adjustment lli for less than bus width " 654 "%s adjustment lli for less than bus width "
740 "(remain %08x)\n", 655 "(remain 0x%08x)\n",
741 __func__, remainder); 656 __func__, bd.remainder);
742 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 657 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
743 num_llis = pl08x_fill_lli_for_desc 658 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
744 (pl08x, txd, num_llis, 1, cctl, &remainder);
745 total_bytes++; 659 total_bytes++;
746 } 660 }
747 661
748 /* 662 /*
749 * Master now aligned 663 * Master now aligned
750 * - if slave is not then we must set its width down 664 * - if slave is not then we must set its width down
751 */ 665 */
752 if (sbus->addr % sbus->buswidth) { 666 if (sbus->addr % sbus->buswidth) {
@@ -761,63 +675,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
761 * Make largest possible LLIs until less than one bus 675 * Make largest possible LLIs until less than one bus
762 * width left 676 * width left
763 */ 677 */
764 while (remainder > (mbus->buswidth - 1)) { 678 while (bd.remainder > (mbus->buswidth - 1)) {
765 int lli_len, target_len; 679 size_t lli_len, target_len, tsize, odd_bytes;
766 int tsize;
767 int odd_bytes;
768 680
769 /* 681 /*
770 * If enough left try to send max possible, 682 * If enough left try to send max possible,
771 * otherwise try to send the remainder 683 * otherwise try to send the remainder
772 */ 684 */
773 target_len = remainder; 685 target_len = min(bd.remainder, max_bytes_per_lli);
774 if (remainder > max_bytes_per_lli)
775 target_len = max_bytes_per_lli;
776 686
777 /* 687 /*
778 * Set bus lengths for incrementing busses 688 * Set bus lengths for incrementing buses to the
779 * to number of bytes which fill to next memory 689 * number of bytes which fill to next memory boundary,
780 * boundary 690 * limiting on the target length calculated above.
781 */ 691 */
782 if (cctl & PL080_CONTROL_SRC_INCR) 692 if (cctl & PL080_CONTROL_SRC_INCR)
783 txd->srcbus.fill_bytes = 693 bd.srcbus.fill_bytes =
784 pl08x_pre_boundary( 694 pl08x_pre_boundary(bd.srcbus.addr,
785 txd->srcbus.addr, 695 target_len);
786 remainder);
787 else 696 else
788 txd->srcbus.fill_bytes = 697 bd.srcbus.fill_bytes = target_len;
789 max_bytes_per_lli;
790 698
791 if (cctl & PL080_CONTROL_DST_INCR) 699 if (cctl & PL080_CONTROL_DST_INCR)
792 txd->dstbus.fill_bytes = 700 bd.dstbus.fill_bytes =
793 pl08x_pre_boundary( 701 pl08x_pre_boundary(bd.dstbus.addr,
794 txd->dstbus.addr, 702 target_len);
795 remainder);
796 else 703 else
797 txd->dstbus.fill_bytes = 704 bd.dstbus.fill_bytes = target_len;
798 max_bytes_per_lli;
799 705
800 /* 706 /* Find the nearest */
801 * Find the nearest 707 lli_len = min(bd.srcbus.fill_bytes,
802 */ 708 bd.dstbus.fill_bytes);
803 lli_len = min(txd->srcbus.fill_bytes,
804 txd->dstbus.fill_bytes);
805 709
806 BUG_ON(lli_len > remainder); 710 BUG_ON(lli_len > bd.remainder);
807 711
808 if (lli_len <= 0) { 712 if (lli_len <= 0) {
809 dev_err(&pl08x->adev->dev, 713 dev_err(&pl08x->adev->dev,
810 "%s lli_len is %d, <= 0\n", 714 "%s lli_len is %zu, <= 0\n",
811 __func__, lli_len); 715 __func__, lli_len);
812 return 0; 716 return 0;
813 } 717 }
814 718
815 if (lli_len == target_len) { 719 if (lli_len == target_len) {
816 /* 720 /*
817 * Can send what we wanted 721 * Can send what we wanted.
818 */ 722 * Maintain alignment
819 /*
820 * Maintain alignment
821 */ 723 */
822 lli_len = (lli_len/mbus->buswidth) * 724 lli_len = (lli_len/mbus->buswidth) *
823 mbus->buswidth; 725 mbus->buswidth;
@@ -825,17 +727,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
825 } else { 727 } else {
826 /* 728 /*
827 * So now we know how many bytes to transfer 729 * So now we know how many bytes to transfer
828 * to get to the nearest boundary 730 * to get to the nearest boundary. The next
829 * The next lli will past the boundary 731 * LLI will past the boundary. However, we
830 * - however we may be working to a boundary 732 * may be working to a boundary on the slave
831 * on the slave bus 733 * bus. We need to ensure the master stays
832 * We need to ensure the master stays aligned 734 * aligned, and that we are working in
735 * multiples of the bus widths.
833 */ 736 */
834 odd_bytes = lli_len % mbus->buswidth; 737 odd_bytes = lli_len % mbus->buswidth;
835 /*
836 * - and that we are working in multiples
837 * of the bus widths
838 */
839 lli_len -= odd_bytes; 738 lli_len -= odd_bytes;
840 739
841 } 740 }
@@ -855,41 +754,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
855 754
856 if (target_len != lli_len) { 755 if (target_len != lli_len) {
857 dev_vdbg(&pl08x->adev->dev, 756 dev_vdbg(&pl08x->adev->dev,
858 "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", 757 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
859 __func__, target_len, lli_len, txd->len); 758 __func__, target_len, lli_len, txd->len);
860 } 759 }
861 760
862 cctl = pl08x_cctl_bits(cctl, 761 cctl = pl08x_cctl_bits(cctl,
863 txd->srcbus.buswidth, 762 bd.srcbus.buswidth,
864 txd->dstbus.buswidth, 763 bd.dstbus.buswidth,
865 tsize); 764 tsize);
866 765
867 dev_vdbg(&pl08x->adev->dev, 766 dev_vdbg(&pl08x->adev->dev,
868 "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", 767 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
869 __func__, lli_len, remainder); 768 __func__, lli_len, bd.remainder);
870 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, 769 pl08x_fill_lli_for_desc(&bd, num_llis++,
871 num_llis, lli_len, cctl, 770 lli_len, cctl);
872 &remainder);
873 total_bytes += lli_len; 771 total_bytes += lli_len;
874 } 772 }
875 773
876 774
877 if (odd_bytes) { 775 if (odd_bytes) {
878 /* 776 /*
879 * Creep past the boundary, 777 * Creep past the boundary, maintaining
880 * maintaining master alignment 778 * master alignment
881 */ 779 */
882 int j; 780 int j;
883 for (j = 0; (j < mbus->buswidth) 781 for (j = 0; (j < mbus->buswidth)
884 && (remainder); j++) { 782 && (bd.remainder); j++) {
885 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 783 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
886 dev_vdbg(&pl08x->adev->dev, 784 dev_vdbg(&pl08x->adev->dev,
887 "%s align with boundardy, single byte (remain %08x)\n", 785 "%s align with boundary, single byte (remain 0x%08zx)\n",
888 __func__, remainder); 786 __func__, bd.remainder);
889 num_llis = 787 pl08x_fill_lli_for_desc(&bd,
890 pl08x_fill_lli_for_desc(pl08x, 788 num_llis++, 1, cctl);
891 txd, num_llis, 1,
892 cctl, &remainder);
893 total_bytes++; 789 total_bytes++;
894 } 790 }
895 } 791 }
@@ -898,25 +794,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
898 /* 794 /*
899 * Send any odd bytes 795 * Send any odd bytes
900 */ 796 */
901 if (remainder < 0) { 797 while (bd.remainder) {
902 dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n",
903 __func__, remainder);
904 return 0;
905 }
906
907 while (remainder) {
908 cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 798 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
909 dev_vdbg(&pl08x->adev->dev, 799 dev_vdbg(&pl08x->adev->dev,
910 "%s align with boundardy, single odd byte (remain %d)\n", 800 "%s align with boundary, single odd byte (remain %zu)\n",
911 __func__, remainder); 801 __func__, bd.remainder);
912 num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 802 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
913 1, cctl, &remainder);
914 total_bytes++; 803 total_bytes++;
915 } 804 }
916 } 805 }
917 if (total_bytes != txd->len) { 806 if (total_bytes != txd->len) {
918 dev_err(&pl08x->adev->dev, 807 dev_err(&pl08x->adev->dev,
919 "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", 808 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
920 __func__, total_bytes, txd->len); 809 __func__, total_bytes, txd->len);
921 return 0; 810 return 0;
922 } 811 }
@@ -927,41 +816,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
927 __func__, (u32) MAX_NUM_TSFR_LLIS); 816 __func__, (u32) MAX_NUM_TSFR_LLIS);
928 return 0; 817 return 0;
929 } 818 }
930 /*
931 * Decide whether this is a loop or a terminated transfer
932 */
933 llis_va = txd->llis_va;
934 llis_bus = (struct lli *) txd->llis_bus;
935 819
936 if (cd->circular_buffer) { 820 llis_va = txd->llis_va;
937 /* 821 /* The final LLI terminates the LLI. */
938 * Loop the circular buffer so that the next element 822 llis_va[num_llis - 1].lli = 0;
939 * points back to the beginning of the LLI. 823 /* The final LLI element shall also fire an interrupt. */
940 */ 824 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
941 llis_va[num_llis - 1].next =
942 (dma_addr_t)((unsigned int)&(llis_bus[0]));
943 } else {
944 /*
945 * On non-circular buffers, the final LLI terminates
946 * the LLI.
947 */
948 llis_va[num_llis - 1].next = 0;
949 /*
950 * The final LLI element shall also fire an interrupt
951 */
952 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
953 }
954
955 /* Now store the channel register values */
956 txd->csrc = llis_va[0].src;
957 txd->cdst = llis_va[0].dst;
958 if (num_llis > 1)
959 txd->clli = llis_va[0].next;
960 else
961 txd->clli = 0;
962
963 txd->cctl = llis_va[0].cctl;
964 /* ccfg will be set at physical channel allocation time */
965 825
966#ifdef VERBOSE_DEBUG 826#ifdef VERBOSE_DEBUG
967 { 827 {
@@ -969,13 +829,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
969 829
970 for (i = 0; i < num_llis; i++) { 830 for (i = 0; i < num_llis; i++) {
971 dev_vdbg(&pl08x->adev->dev, 831 dev_vdbg(&pl08x->adev->dev,
972 "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", 832 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
973 i, 833 i,
974 &llis_va[i], 834 &llis_va[i],
975 llis_va[i].src, 835 llis_va[i].src,
976 llis_va[i].dst, 836 llis_va[i].dst,
977 llis_va[i].cctl, 837 llis_va[i].cctl,
978 llis_va[i].next 838 llis_va[i].lli
979 ); 839 );
980 } 840 }
981 } 841 }
@@ -988,14 +848,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
988static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 848static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
989 struct pl08x_txd *txd) 849 struct pl08x_txd *txd)
990{ 850{
991 if (!txd)
992 dev_err(&pl08x->adev->dev,
993 "%s no descriptor to free\n",
994 __func__);
995
996 /* Free the LLI */ 851 /* Free the LLI */
997 dma_pool_free(pl08x->pool, txd->llis_va, 852 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
998 txd->llis_bus);
999 853
1000 pl08x->pool_ctr--; 854 pl08x->pool_ctr--;
1001 855
@@ -1008,13 +862,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1008 struct pl08x_txd *txdi = NULL; 862 struct pl08x_txd *txdi = NULL;
1009 struct pl08x_txd *next; 863 struct pl08x_txd *next;
1010 864
1011 if (!list_empty(&plchan->desc_list)) { 865 if (!list_empty(&plchan->pend_list)) {
1012 list_for_each_entry_safe(txdi, 866 list_for_each_entry_safe(txdi,
1013 next, &plchan->desc_list, node) { 867 next, &plchan->pend_list, node) {
1014 list_del(&txdi->node); 868 list_del(&txdi->node);
1015 pl08x_free_txd(pl08x, txdi); 869 pl08x_free_txd(pl08x, txdi);
1016 } 870 }
1017
1018 } 871 }
1019} 872}
1020 873
@@ -1069,6 +922,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1069 return -EBUSY; 922 return -EBUSY;
1070 } 923 }
1071 ch->signal = ret; 924 ch->signal = ret;
925
926 /* Assign the flow control signal to this channel */
927 if (txd->direction == DMA_TO_DEVICE)
928 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
929 else if (txd->direction == DMA_FROM_DEVICE)
930 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1072 } 931 }
1073 932
1074 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 933 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
@@ -1076,19 +935,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1076 ch->signal, 935 ch->signal,
1077 plchan->name); 936 plchan->name);
1078 937
938 plchan->phychan_hold++;
1079 plchan->phychan = ch; 939 plchan->phychan = ch;
1080 940
1081 return 0; 941 return 0;
1082} 942}
1083 943
944static void release_phy_channel(struct pl08x_dma_chan *plchan)
945{
946 struct pl08x_driver_data *pl08x = plchan->host;
947
948 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
949 pl08x->pd->put_signal(plchan);
950 plchan->phychan->signal = -1;
951 }
952 pl08x_put_phy_channel(pl08x, plchan->phychan);
953 plchan->phychan = NULL;
954}
955
1084static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 956static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1085{ 957{
1086 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 958 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
959 struct pl08x_txd *txd = to_pl08x_txd(tx);
960 unsigned long flags;
1087 961
1088 atomic_inc(&plchan->last_issued); 962 spin_lock_irqsave(&plchan->lock, flags);
1089 tx->cookie = atomic_read(&plchan->last_issued); 963
1090 /* This unlock follows the lock in the prep() function */ 964 plchan->chan.cookie += 1;
1091 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 965 if (plchan->chan.cookie < 0)
966 plchan->chan.cookie = 1;
967 tx->cookie = plchan->chan.cookie;
968
969 /* Put this onto the pending list */
970 list_add_tail(&txd->node, &plchan->pend_list);
971
972 /*
973 * If there was no physical channel available for this memcpy,
974 * stack the request up and indicate that the channel is waiting
975 * for a free physical channel.
976 */
977 if (!plchan->slave && !plchan->phychan) {
978 /* Do this memcpy whenever there is a channel ready */
979 plchan->state = PL08X_CHAN_WAITING;
980 plchan->waiting = txd;
981 } else {
982 plchan->phychan_hold--;
983 }
984
985 spin_unlock_irqrestore(&plchan->lock, flags);
1092 986
1093 return tx->cookie; 987 return tx->cookie;
1094} 988}
@@ -1102,10 +996,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1102} 996}
1103 997
1104/* 998/*
1105 * Code accessing dma_async_is_complete() in a tight loop 999 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1106 * may give problems - could schedule where indicated. 1000 * If slaves are relying on interrupts to signal completion this function
1107 * If slaves are relying on interrupts to signal completion this 1001 * must not be called with interrupts disabled.
1108 * function must not be called with interrupts disabled
1109 */ 1002 */
1110static enum dma_status 1003static enum dma_status
1111pl08x_dma_tx_status(struct dma_chan *chan, 1004pl08x_dma_tx_status(struct dma_chan *chan,
@@ -1118,7 +1011,7 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1118 enum dma_status ret; 1011 enum dma_status ret;
1119 u32 bytesleft = 0; 1012 u32 bytesleft = 0;
1120 1013
1121 last_used = atomic_read(&plchan->last_issued); 1014 last_used = plchan->chan.cookie;
1122 last_complete = plchan->lc; 1015 last_complete = plchan->lc;
1123 1016
1124 ret = dma_async_is_complete(cookie, last_complete, last_used); 1017 ret = dma_async_is_complete(cookie, last_complete, last_used);
@@ -1128,13 +1021,9 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1128 } 1021 }
1129 1022
1130 /* 1023 /*
1131 * schedule(); could be inserted here
1132 */
1133
1134 /*
1135 * This cookie not complete yet 1024 * This cookie not complete yet
1136 */ 1025 */
1137 last_used = atomic_read(&plchan->last_issued); 1026 last_used = plchan->chan.cookie;
1138 last_complete = plchan->lc; 1027 last_complete = plchan->lc;
1139 1028
1140 /* Get number of bytes left in the active transactions and queue */ 1029 /* Get number of bytes left in the active transactions and queue */
@@ -1199,37 +1088,35 @@ static const struct burst_table burst_sizes[] = {
1199 }, 1088 },
1200}; 1089};
1201 1090
1202static void dma_set_runtime_config(struct dma_chan *chan, 1091static int dma_set_runtime_config(struct dma_chan *chan,
1203 struct dma_slave_config *config) 1092 struct dma_slave_config *config)
1204{ 1093{
1205 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1094 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1206 struct pl08x_driver_data *pl08x = plchan->host; 1095 struct pl08x_driver_data *pl08x = plchan->host;
1207 struct pl08x_channel_data *cd = plchan->cd; 1096 struct pl08x_channel_data *cd = plchan->cd;
1208 enum dma_slave_buswidth addr_width; 1097 enum dma_slave_buswidth addr_width;
1098 dma_addr_t addr;
1209 u32 maxburst; 1099 u32 maxburst;
1210 u32 cctl = 0; 1100 u32 cctl = 0;
1211 /* Mask out all except src and dst channel */ 1101 int i;
1212 u32 ccfg = cd->ccfg & 0x000003DEU; 1102
1213 int i = 0; 1103 if (!plchan->slave)
1104 return -EINVAL;
1214 1105
1215 /* Transfer direction */ 1106 /* Transfer direction */
1216 plchan->runtime_direction = config->direction; 1107 plchan->runtime_direction = config->direction;
1217 if (config->direction == DMA_TO_DEVICE) { 1108 if (config->direction == DMA_TO_DEVICE) {
1218 plchan->runtime_addr = config->dst_addr; 1109 addr = config->dst_addr;
1219 cctl |= PL080_CONTROL_SRC_INCR;
1220 ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1221 addr_width = config->dst_addr_width; 1110 addr_width = config->dst_addr_width;
1222 maxburst = config->dst_maxburst; 1111 maxburst = config->dst_maxburst;
1223 } else if (config->direction == DMA_FROM_DEVICE) { 1112 } else if (config->direction == DMA_FROM_DEVICE) {
1224 plchan->runtime_addr = config->src_addr; 1113 addr = config->src_addr;
1225 cctl |= PL080_CONTROL_DST_INCR;
1226 ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1227 addr_width = config->src_addr_width; 1114 addr_width = config->src_addr_width;
1228 maxburst = config->src_maxburst; 1115 maxburst = config->src_maxburst;
1229 } else { 1116 } else {
1230 dev_err(&pl08x->adev->dev, 1117 dev_err(&pl08x->adev->dev,
1231 "bad runtime_config: alien transfer direction\n"); 1118 "bad runtime_config: alien transfer direction\n");
1232 return; 1119 return -EINVAL;
1233 } 1120 }
1234 1121
1235 switch (addr_width) { 1122 switch (addr_width) {
@@ -1248,42 +1135,40 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1248 default: 1135 default:
1249 dev_err(&pl08x->adev->dev, 1136 dev_err(&pl08x->adev->dev,
1250 "bad runtime_config: alien address width\n"); 1137 "bad runtime_config: alien address width\n");
1251 return; 1138 return -EINVAL;
1252 } 1139 }
1253 1140
1254 /* 1141 /*
1255 * Now decide on a maxburst: 1142 * Now decide on a maxburst:
1256 * If this channel will only request single transfers, set 1143 * If this channel will only request single transfers, set this
1257 * this down to ONE element. 1144 * down to ONE element. Also select one element if no maxburst
1145 * is specified.
1258 */ 1146 */
1259 if (plchan->cd->single) { 1147 if (plchan->cd->single || maxburst == 0) {
1260 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1148 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1261 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1149 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1262 } else { 1150 } else {
1263 while (i < ARRAY_SIZE(burst_sizes)) { 1151 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1264 if (burst_sizes[i].burstwords <= maxburst) 1152 if (burst_sizes[i].burstwords <= maxburst)
1265 break; 1153 break;
1266 i++;
1267 }
1268 cctl |= burst_sizes[i].reg; 1154 cctl |= burst_sizes[i].reg;
1269 } 1155 }
1270 1156
1271 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1157 plchan->runtime_addr = addr;
1272 cctl &= ~PL080_CONTROL_PROT_MASK;
1273 cctl |= PL080_CONTROL_PROT_SYS;
1274 1158
1275 /* Modify the default channel data to fit PrimeCell request */ 1159 /* Modify the default channel data to fit PrimeCell request */
1276 cd->cctl = cctl; 1160 cd->cctl = cctl;
1277 cd->ccfg = ccfg;
1278 1161
1279 dev_dbg(&pl08x->adev->dev, 1162 dev_dbg(&pl08x->adev->dev,
1280 "configured channel %s (%s) for %s, data width %d, " 1163 "configured channel %s (%s) for %s, data width %d, "
1281 "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", 1164 "maxburst %d words, LE, CCTL=0x%08x\n",
1282 dma_chan_name(chan), plchan->name, 1165 dma_chan_name(chan), plchan->name,
1283 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1166 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1284 addr_width, 1167 addr_width,
1285 maxburst, 1168 maxburst,
1286 cctl, ccfg); 1169 cctl);
1170
1171 return 0;
1287} 1172}
1288 1173
1289/* 1174/*
@@ -1293,35 +1178,26 @@ static void dma_set_runtime_config(struct dma_chan *chan,
1293static void pl08x_issue_pending(struct dma_chan *chan) 1178static void pl08x_issue_pending(struct dma_chan *chan)
1294{ 1179{
1295 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1180 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1296 struct pl08x_driver_data *pl08x = plchan->host;
1297 unsigned long flags; 1181 unsigned long flags;
1298 1182
1299 spin_lock_irqsave(&plchan->lock, flags); 1183 spin_lock_irqsave(&plchan->lock, flags);
1300 /* Something is already active */ 1184 /* Something is already active, or we're waiting for a channel... */
1301 if (plchan->at) { 1185 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1302 spin_unlock_irqrestore(&plchan->lock, flags); 1186 spin_unlock_irqrestore(&plchan->lock, flags);
1303 return;
1304 }
1305
1306 /* Didn't get a physical channel so waiting for it ... */
1307 if (plchan->state == PL08X_CHAN_WAITING)
1308 return; 1187 return;
1188 }
1309 1189
1310 /* Take the first element in the queue and execute it */ 1190 /* Take the first element in the queue and execute it */
1311 if (!list_empty(&plchan->desc_list)) { 1191 if (!list_empty(&plchan->pend_list)) {
1312 struct pl08x_txd *next; 1192 struct pl08x_txd *next;
1313 1193
1314 next = list_first_entry(&plchan->desc_list, 1194 next = list_first_entry(&plchan->pend_list,
1315 struct pl08x_txd, 1195 struct pl08x_txd,
1316 node); 1196 node);
1317 list_del(&next->node); 1197 list_del(&next->node);
1318 plchan->at = next;
1319 plchan->state = PL08X_CHAN_RUNNING; 1198 plchan->state = PL08X_CHAN_RUNNING;
1320 1199
1321 /* Configure the physical channel for the active txd */ 1200 pl08x_start_txd(plchan, next);
1322 pl08x_config_phychan_for_txd(plchan);
1323 pl08x_set_cregs(pl08x, plchan->phychan);
1324 pl08x_enable_phy_chan(pl08x, plchan->phychan);
1325 } 1201 }
1326 1202
1327 spin_unlock_irqrestore(&plchan->lock, flags); 1203 spin_unlock_irqrestore(&plchan->lock, flags);
@@ -1330,30 +1206,17 @@ static void pl08x_issue_pending(struct dma_chan *chan)
1330static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1206static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1331 struct pl08x_txd *txd) 1207 struct pl08x_txd *txd)
1332{ 1208{
1333 int num_llis;
1334 struct pl08x_driver_data *pl08x = plchan->host; 1209 struct pl08x_driver_data *pl08x = plchan->host;
1335 int ret; 1210 unsigned long flags;
1211 int num_llis, ret;
1336 1212
1337 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1213 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1338 1214 if (!num_llis) {
1339 if (!num_llis) 1215 kfree(txd);
1340 return -EINVAL; 1216 return -EINVAL;
1217 }
1341 1218
1342 spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1219 spin_lock_irqsave(&plchan->lock, flags);
1343
1344 /*
1345 * If this device is not using a circular buffer then
1346 * queue this new descriptor for transfer.
1347 * The descriptor for a circular buffer continues
1348 * to be used until the channel is freed.
1349 */
1350 if (txd->cd->circular_buffer)
1351 dev_err(&pl08x->adev->dev,
1352 "%s attempting to queue a circular buffer\n",
1353 __func__);
1354 else
1355 list_add_tail(&txd->node,
1356 &plchan->desc_list);
1357 1220
1358 /* 1221 /*
1359 * See if we already have a physical channel allocated, 1222 * See if we already have a physical channel allocated,
@@ -1362,45 +1225,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1362 ret = prep_phy_channel(plchan, txd); 1225 ret = prep_phy_channel(plchan, txd);
1363 if (ret) { 1226 if (ret) {
1364 /* 1227 /*
1365 * No physical channel available, we will 1228 * No physical channel was available.
1366 * stack up the memcpy channels until there is a channel 1229 *
1367 * available to handle it whereas slave transfers may 1230 * memcpy transfers can be sorted out at submission time.
1368 * have been denied due to platform channel muxing restrictions 1231 *
1369 * and since there is no guarantee that this will ever be 1232 * Slave transfers may have been denied due to platform
1370 * resolved, and since the signal must be aquired AFTER 1233 * channel muxing restrictions. Since there is no guarantee
1371 * aquiring the physical channel, we will let them be NACK:ed 1234 * that this will ever be resolved, and the signal must be
1372 * with -EBUSY here. The drivers can alway retry the prep() 1235 * acquired AFTER acquiring the physical channel, we will let
1373 * call if they are eager on doing this using DMA. 1236 * them be NACK:ed with -EBUSY here. The drivers can retry
1237 * the prep() call if they are eager on doing this using DMA.
1374 */ 1238 */
1375 if (plchan->slave) { 1239 if (plchan->slave) {
1376 pl08x_free_txd_list(pl08x, plchan); 1240 pl08x_free_txd_list(pl08x, plchan);
1377 spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1241 pl08x_free_txd(pl08x, txd);
1242 spin_unlock_irqrestore(&plchan->lock, flags);
1378 return -EBUSY; 1243 return -EBUSY;
1379 } 1244 }
1380 /* Do this memcpy whenever there is a channel ready */
1381 plchan->state = PL08X_CHAN_WAITING;
1382 plchan->waiting = txd;
1383 } else 1245 } else
1384 /* 1246 /*
1385 * Else we're all set, paused and ready to roll, 1247 * Else we're all set, paused and ready to roll, status
1386 * status will switch to PL08X_CHAN_RUNNING when 1248 * will switch to PL08X_CHAN_RUNNING when we call
1387 * we call issue_pending(). If there is something 1249 * issue_pending(). If there is something running on the
1388 * running on the channel already we don't change 1250 * channel already we don't change its state.
1389 * its state.
1390 */ 1251 */
1391 if (plchan->state == PL08X_CHAN_IDLE) 1252 if (plchan->state == PL08X_CHAN_IDLE)
1392 plchan->state = PL08X_CHAN_PAUSED; 1253 plchan->state = PL08X_CHAN_PAUSED;
1393 1254
1394 /* 1255 spin_unlock_irqrestore(&plchan->lock, flags);
1395 * Notice that we leave plchan->lock locked on purpose:
1396 * it will be unlocked in the subsequent tx_submit()
1397 * call. This is a consequence of the current API.
1398 */
1399 1256
1400 return 0; 1257 return 0;
1401} 1258}
1402 1259
1403/* 1260/*
1261 * Given the source and destination available bus masks, select which
1262 * will be routed to each port. We try to have source and destination
1263 * on separate ports, but always respect the allowable settings.
1264 */
1265static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1266{
1267 u32 cctl = 0;
1268
1269 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1270 cctl |= PL080_CONTROL_DST_AHB2;
1271 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1272 cctl |= PL080_CONTROL_SRC_AHB2;
1273
1274 return cctl;
1275}
1276
1277static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1278 unsigned long flags)
1279{
1280 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1281
1282 if (txd) {
1283 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1284 txd->tx.flags = flags;
1285 txd->tx.tx_submit = pl08x_tx_submit;
1286 INIT_LIST_HEAD(&txd->node);
1287
1288 /* Always enable error and terminal interrupts */
1289 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1290 PL080_CONFIG_TC_IRQ_MASK;
1291 }
1292 return txd;
1293}
1294
1295/*
1404 * Initialize a descriptor to be used by memcpy submit 1296 * Initialize a descriptor to be used by memcpy submit
1405 */ 1297 */
1406static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1298static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
@@ -1412,40 +1304,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1412 struct pl08x_txd *txd; 1304 struct pl08x_txd *txd;
1413 int ret; 1305 int ret;
1414 1306
1415 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1307 txd = pl08x_get_txd(plchan, flags);
1416 if (!txd) { 1308 if (!txd) {
1417 dev_err(&pl08x->adev->dev, 1309 dev_err(&pl08x->adev->dev,
1418 "%s no memory for descriptor\n", __func__); 1310 "%s no memory for descriptor\n", __func__);
1419 return NULL; 1311 return NULL;
1420 } 1312 }
1421 1313
1422 dma_async_tx_descriptor_init(&txd->tx, chan);
1423 txd->direction = DMA_NONE; 1314 txd->direction = DMA_NONE;
1424 txd->srcbus.addr = src; 1315 txd->src_addr = src;
1425 txd->dstbus.addr = dest; 1316 txd->dst_addr = dest;
1317 txd->len = len;
1426 1318
1427 /* Set platform data for m2m */ 1319 /* Set platform data for m2m */
1428 txd->cd = &pl08x->pd->memcpy_channel; 1320 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1321 txd->cctl = pl08x->pd->memcpy_channel.cctl &
1322 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1323
1429 /* Both to be incremented or the code will break */ 1324 /* Both to be incremented or the code will break */
1430 txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1325 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1431 txd->tx.tx_submit = pl08x_tx_submit; 1326
1432 txd->tx.callback = NULL; 1327 if (pl08x->vd->dualmaster)
1433 txd->tx.callback_param = NULL; 1328 txd->cctl |= pl08x_select_bus(pl08x,
1434 txd->len = len; 1329 pl08x->mem_buses, pl08x->mem_buses);
1435 1330
1436 INIT_LIST_HEAD(&txd->node);
1437 ret = pl08x_prep_channel_resources(plchan, txd); 1331 ret = pl08x_prep_channel_resources(plchan, txd);
1438 if (ret) 1332 if (ret)
1439 return NULL; 1333 return NULL;
1440 /*
1441 * NB: the channel lock is held at this point so tx_submit()
1442 * must be called in direct succession.
1443 */
1444 1334
1445 return &txd->tx; 1335 return &txd->tx;
1446} 1336}
1447 1337
1448struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1338static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449 struct dma_chan *chan, struct scatterlist *sgl, 1339 struct dma_chan *chan, struct scatterlist *sgl,
1450 unsigned int sg_len, enum dma_data_direction direction, 1340 unsigned int sg_len, enum dma_data_direction direction,
1451 unsigned long flags) 1341 unsigned long flags)
@@ -1453,6 +1343,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1453 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1343 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1454 struct pl08x_driver_data *pl08x = plchan->host; 1344 struct pl08x_driver_data *pl08x = plchan->host;
1455 struct pl08x_txd *txd; 1345 struct pl08x_txd *txd;
1346 u8 src_buses, dst_buses;
1456 int ret; 1347 int ret;
1457 1348
1458 /* 1349 /*
@@ -1467,14 +1358,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1467 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1358 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1468 __func__, sgl->length, plchan->name); 1359 __func__, sgl->length, plchan->name);
1469 1360
1470 txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1361 txd = pl08x_get_txd(plchan, flags);
1471 if (!txd) { 1362 if (!txd) {
1472 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1363 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1473 return NULL; 1364 return NULL;
1474 } 1365 }
1475 1366
1476 dma_async_tx_descriptor_init(&txd->tx, chan);
1477
1478 if (direction != plchan->runtime_direction) 1367 if (direction != plchan->runtime_direction)
1479 dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1368 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1480 "the direction configured for the PrimeCell\n", 1369 "the direction configured for the PrimeCell\n",
@@ -1486,37 +1375,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1486 * channel target address dynamically at runtime. 1375 * channel target address dynamically at runtime.
1487 */ 1376 */
1488 txd->direction = direction; 1377 txd->direction = direction;
1378 txd->len = sgl->length;
1379
1380 txd->cctl = plchan->cd->cctl &
1381 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1382 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1383 PL080_CONTROL_PROT_MASK);
1384
1385 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1386 txd->cctl |= PL080_CONTROL_PROT_SYS;
1387
1489 if (direction == DMA_TO_DEVICE) { 1388 if (direction == DMA_TO_DEVICE) {
1490 txd->srcbus.addr = sgl->dma_address; 1389 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1390 txd->cctl |= PL080_CONTROL_SRC_INCR;
1391 txd->src_addr = sgl->dma_address;
1491 if (plchan->runtime_addr) 1392 if (plchan->runtime_addr)
1492 txd->dstbus.addr = plchan->runtime_addr; 1393 txd->dst_addr = plchan->runtime_addr;
1493 else 1394 else
1494 txd->dstbus.addr = plchan->cd->addr; 1395 txd->dst_addr = plchan->cd->addr;
1396 src_buses = pl08x->mem_buses;
1397 dst_buses = plchan->cd->periph_buses;
1495 } else if (direction == DMA_FROM_DEVICE) { 1398 } else if (direction == DMA_FROM_DEVICE) {
1399 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1400 txd->cctl |= PL080_CONTROL_DST_INCR;
1496 if (plchan->runtime_addr) 1401 if (plchan->runtime_addr)
1497 txd->srcbus.addr = plchan->runtime_addr; 1402 txd->src_addr = plchan->runtime_addr;
1498 else 1403 else
1499 txd->srcbus.addr = plchan->cd->addr; 1404 txd->src_addr = plchan->cd->addr;
1500 txd->dstbus.addr = sgl->dma_address; 1405 txd->dst_addr = sgl->dma_address;
1406 src_buses = plchan->cd->periph_buses;
1407 dst_buses = pl08x->mem_buses;
1501 } else { 1408 } else {
1502 dev_err(&pl08x->adev->dev, 1409 dev_err(&pl08x->adev->dev,
1503 "%s direction unsupported\n", __func__); 1410 "%s direction unsupported\n", __func__);
1504 return NULL; 1411 return NULL;
1505 } 1412 }
1506 txd->cd = plchan->cd; 1413
1507 txd->tx.tx_submit = pl08x_tx_submit; 1414 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1508 txd->tx.callback = NULL;
1509 txd->tx.callback_param = NULL;
1510 txd->len = sgl->length;
1511 INIT_LIST_HEAD(&txd->node);
1512 1415
1513 ret = pl08x_prep_channel_resources(plchan, txd); 1416 ret = pl08x_prep_channel_resources(plchan, txd);
1514 if (ret) 1417 if (ret)
1515 return NULL; 1418 return NULL;
1516 /*
1517 * NB: the channel lock is held at this point so tx_submit()
1518 * must be called in direct succession.
1519 */
1520 1419
1521 return &txd->tx; 1420 return &txd->tx;
1522} 1421}
@@ -1531,10 +1430,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1531 1430
1532 /* Controls applicable to inactive channels */ 1431 /* Controls applicable to inactive channels */
1533 if (cmd == DMA_SLAVE_CONFIG) { 1432 if (cmd == DMA_SLAVE_CONFIG) {
1534 dma_set_runtime_config(chan, 1433 return dma_set_runtime_config(chan,
1535 (struct dma_slave_config *) 1434 (struct dma_slave_config *)arg);
1536 arg);
1537 return 0;
1538 } 1435 }
1539 1436
1540 /* 1437 /*
@@ -1558,16 +1455,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1558 * Mark physical channel as free and free any slave 1455 * Mark physical channel as free and free any slave
1559 * signal 1456 * signal
1560 */ 1457 */
1561 if ((plchan->phychan->signal >= 0) && 1458 release_phy_channel(plchan);
1562 pl08x->pd->put_signal) {
1563 pl08x->pd->put_signal(plchan);
1564 plchan->phychan->signal = -1;
1565 }
1566 pl08x_put_phy_channel(pl08x, plchan->phychan);
1567 plchan->phychan = NULL;
1568 } 1459 }
1569 /* Stop any pending tasklet */
1570 tasklet_disable(&plchan->tasklet);
1571 /* Dequeue jobs and free LLIs */ 1460 /* Dequeue jobs and free LLIs */
1572 if (plchan->at) { 1461 if (plchan->at) {
1573 pl08x_free_txd(pl08x, plchan->at); 1462 pl08x_free_txd(pl08x, plchan->at);
@@ -1609,10 +1498,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1609 1498
1610/* 1499/*
1611 * Just check that the device is there and active 1500 * Just check that the device is there and active
1612 * TODO: turn this bit on/off depending on the number of 1501 * TODO: turn this bit on/off depending on the number of physical channels
1613 * physical channels actually used, if it is zero... well 1502 * actually used, if it is zero... well shut it off. That will save some
1614 * shut it off. That will save some power. Cut the clock 1503 * power. Cut the clock at the same time.
1615 * at the same time.
1616 */ 1504 */
1617static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1505static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1618{ 1506{
@@ -1620,78 +1508,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1620 1508
1621 val = readl(pl08x->base + PL080_CONFIG); 1509 val = readl(pl08x->base + PL080_CONFIG);
1622 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1510 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1623 /* We implictly clear bit 1 and that means little-endian mode */ 1511 /* We implicitly clear bit 1 and that means little-endian mode */
1624 val |= PL080_CONFIG_ENABLE; 1512 val |= PL080_CONFIG_ENABLE;
1625 writel(val, pl08x->base + PL080_CONFIG); 1513 writel(val, pl08x->base + PL080_CONFIG);
1626} 1514}
1627 1515
1516static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1517{
1518 struct device *dev = txd->tx.chan->device->dev;
1519
1520 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1521 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1522 dma_unmap_single(dev, txd->src_addr, txd->len,
1523 DMA_TO_DEVICE);
1524 else
1525 dma_unmap_page(dev, txd->src_addr, txd->len,
1526 DMA_TO_DEVICE);
1527 }
1528 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1529 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1530 dma_unmap_single(dev, txd->dst_addr, txd->len,
1531 DMA_FROM_DEVICE);
1532 else
1533 dma_unmap_page(dev, txd->dst_addr, txd->len,
1534 DMA_FROM_DEVICE);
1535 }
1536}
1537
1628static void pl08x_tasklet(unsigned long data) 1538static void pl08x_tasklet(unsigned long data)
1629{ 1539{
1630 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1540 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1631 struct pl08x_phy_chan *phychan = plchan->phychan;
1632 struct pl08x_driver_data *pl08x = plchan->host; 1541 struct pl08x_driver_data *pl08x = plchan->host;
1542 struct pl08x_txd *txd;
1543 unsigned long flags;
1633 1544
1634 if (!plchan) 1545 spin_lock_irqsave(&plchan->lock, flags);
1635 BUG();
1636
1637 spin_lock(&plchan->lock);
1638
1639 if (plchan->at) {
1640 dma_async_tx_callback callback =
1641 plchan->at->tx.callback;
1642 void *callback_param =
1643 plchan->at->tx.callback_param;
1644
1645 /*
1646 * Update last completed
1647 */
1648 plchan->lc =
1649 (plchan->at->tx.cookie);
1650
1651 /*
1652 * Callback to signal completion
1653 */
1654 if (callback)
1655 callback(callback_param);
1656 1546
1657 /* 1547 txd = plchan->at;
1658 * Device callbacks should NOT clear 1548 plchan->at = NULL;
1659 * the current transaction on the channel
1660 * Linus: sometimes they should?
1661 */
1662 if (!plchan->at)
1663 BUG();
1664 1549
1665 /* 1550 if (txd) {
1666 * Free the descriptor if it's not for a device 1551 /* Update last completed */
1667 * using a circular buffer 1552 plchan->lc = txd->tx.cookie;
1668 */
1669 if (!plchan->at->cd->circular_buffer) {
1670 pl08x_free_txd(pl08x, plchan->at);
1671 plchan->at = NULL;
1672 }
1673 /*
1674 * else descriptor for circular
1675 * buffers only freed when
1676 * client has disabled dma
1677 */
1678 } 1553 }
1679 /* 1554
1680 * If a new descriptor is queued, set it up 1555 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1681 * plchan->at is NULL here 1556 if (!list_empty(&plchan->pend_list)) {
1682 */
1683 if (!list_empty(&plchan->desc_list)) {
1684 struct pl08x_txd *next; 1557 struct pl08x_txd *next;
1685 1558
1686 next = list_first_entry(&plchan->desc_list, 1559 next = list_first_entry(&plchan->pend_list,
1687 struct pl08x_txd, 1560 struct pl08x_txd,
1688 node); 1561 node);
1689 list_del(&next->node); 1562 list_del(&next->node);
1690 plchan->at = next; 1563
1691 /* Configure the physical channel for the next txd */ 1564 pl08x_start_txd(plchan, next);
1692 pl08x_config_phychan_for_txd(plchan); 1565 } else if (plchan->phychan_hold) {
1693 pl08x_set_cregs(pl08x, plchan->phychan); 1566 /*
1694 pl08x_enable_phy_chan(pl08x, plchan->phychan); 1567 * This channel is still in use - we have a new txd being
1568 * prepared and will soon be queued. Don't give up the
1569 * physical channel.
1570 */
1695 } else { 1571 } else {
1696 struct pl08x_dma_chan *waiting = NULL; 1572 struct pl08x_dma_chan *waiting = NULL;
1697 1573
@@ -1699,20 +1575,14 @@ static void pl08x_tasklet(unsigned long data)
1699 * No more jobs, so free up the physical channel 1575 * No more jobs, so free up the physical channel
1700 * Free any allocated signal on slave transfers too 1576 * Free any allocated signal on slave transfers too
1701 */ 1577 */
1702 if ((phychan->signal >= 0) && pl08x->pd->put_signal) { 1578 release_phy_channel(plchan);
1703 pl08x->pd->put_signal(plchan);
1704 phychan->signal = -1;
1705 }
1706 pl08x_put_phy_channel(pl08x, phychan);
1707 plchan->phychan = NULL;
1708 plchan->state = PL08X_CHAN_IDLE; 1579 plchan->state = PL08X_CHAN_IDLE;
1709 1580
1710 /* 1581 /*
1711 * And NOW before anyone else can grab that free:d 1582 * And NOW before anyone else can grab that free:d up
1712 * up physical channel, see if there is some memcpy 1583 * physical channel, see if there is some memcpy pending
1713 * pending that seriously needs to start because of 1584 * that seriously needs to start because of being stacked
1714 * being stacked up while we were choking the 1585 * up while we were choking the physical channels with data.
1715 * physical channels with data.
1716 */ 1586 */
1717 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1587 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1718 chan.device_node) { 1588 chan.device_node) {
@@ -1724,6 +1594,7 @@ static void pl08x_tasklet(unsigned long data)
1724 ret = prep_phy_channel(waiting, 1594 ret = prep_phy_channel(waiting,
1725 waiting->waiting); 1595 waiting->waiting);
1726 BUG_ON(ret); 1596 BUG_ON(ret);
1597 waiting->phychan_hold--;
1727 waiting->state = PL08X_CHAN_RUNNING; 1598 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL; 1599 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan); 1600 pl08x_issue_pending(&waiting->chan);
@@ -1732,7 +1603,25 @@ static void pl08x_tasklet(unsigned long data)
1732 } 1603 }
1733 } 1604 }
1734 1605
1735 spin_unlock(&plchan->lock); 1606 spin_unlock_irqrestore(&plchan->lock, flags);
1607
1608 if (txd) {
1609 dma_async_tx_callback callback = txd->tx.callback;
1610 void *callback_param = txd->tx.callback_param;
1611
1612 /* Don't try to unmap buffers on slave channels */
1613 if (!plchan->slave)
1614 pl08x_unmap_buffers(txd);
1615
1616 /* Free the descriptor */
1617 spin_lock_irqsave(&plchan->lock, flags);
1618 pl08x_free_txd(pl08x, txd);
1619 spin_unlock_irqrestore(&plchan->lock, flags);
1620
1621 /* Callback to signal completion */
1622 if (callback)
1623 callback(callback_param);
1624 }
1736} 1625}
1737 1626
1738static irqreturn_t pl08x_irq(int irq, void *dev) 1627static irqreturn_t pl08x_irq(int irq, void *dev)
@@ -1744,9 +1633,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1744 1633
1745 val = readl(pl08x->base + PL080_ERR_STATUS); 1634 val = readl(pl08x->base + PL080_ERR_STATUS);
1746 if (val) { 1635 if (val) {
1747 /* 1636 /* An error interrupt (on one or more channels) */
1748 * An error interrupt (on one or more channels)
1749 */
1750 dev_err(&pl08x->adev->dev, 1637 dev_err(&pl08x->adev->dev,
1751 "%s error interrupt, register value 0x%08x\n", 1638 "%s error interrupt, register value 0x%08x\n",
1752 __func__, val); 1639 __func__, val);
@@ -1770,9 +1657,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1770 mask |= (1 << i); 1657 mask |= (1 << i);
1771 } 1658 }
1772 } 1659 }
1773 /* 1660 /* Clear only the terminal interrupts on channels we processed */
1774 * Clear only the terminal interrupts on channels we processed
1775 */
1776 writel(mask, pl08x->base + PL080_TC_CLEAR); 1661 writel(mask, pl08x->base + PL080_TC_CLEAR);
1777 1662
1778 return mask ? IRQ_HANDLED : IRQ_NONE; 1663 return mask ? IRQ_HANDLED : IRQ_NONE;
@@ -1791,6 +1676,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1791 int i; 1676 int i;
1792 1677
1793 INIT_LIST_HEAD(&dmadev->channels); 1678 INIT_LIST_HEAD(&dmadev->channels);
1679
1794 /* 1680 /*
1795 * Register as many many memcpy as we have physical channels, 1681 * Register as many many memcpy as we have physical channels,
1796 * we won't always be able to use all but the code will have 1682 * we won't always be able to use all but the code will have
@@ -1819,16 +1705,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1819 return -ENOMEM; 1705 return -ENOMEM;
1820 } 1706 }
1821 } 1707 }
1708 if (chan->cd->circular_buffer) {
1709 dev_err(&pl08x->adev->dev,
1710 "channel %s: circular buffers not supported\n",
1711 chan->name);
1712 kfree(chan);
1713 continue;
1714 }
1822 dev_info(&pl08x->adev->dev, 1715 dev_info(&pl08x->adev->dev,
1823 "initialize virtual channel \"%s\"\n", 1716 "initialize virtual channel \"%s\"\n",
1824 chan->name); 1717 chan->name);
1825 1718
1826 chan->chan.device = dmadev; 1719 chan->chan.device = dmadev;
1827 atomic_set(&chan->last_issued, 0); 1720 chan->chan.cookie = 0;
1828 chan->lc = atomic_read(&chan->last_issued); 1721 chan->lc = 0;
1829 1722
1830 spin_lock_init(&chan->lock); 1723 spin_lock_init(&chan->lock);
1831 INIT_LIST_HEAD(&chan->desc_list); 1724 INIT_LIST_HEAD(&chan->pend_list);
1832 tasklet_init(&chan->tasklet, pl08x_tasklet, 1725 tasklet_init(&chan->tasklet, pl08x_tasklet,
1833 (unsigned long) chan); 1726 (unsigned long) chan);
1834 1727
@@ -1898,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1898 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1791 seq_printf(s, "CHANNEL:\tSTATE:\n");
1899 seq_printf(s, "--------\t------\n"); 1792 seq_printf(s, "--------\t------\n");
1900 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1793 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1901 seq_printf(s, "%s\t\t\%s\n", chan->name, 1794 seq_printf(s, "%s\t\t%s\n", chan->name,
1902 pl08x_state_str(chan->state)); 1795 pl08x_state_str(chan->state));
1903 } 1796 }
1904 1797
@@ -1906,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1906 seq_printf(s, "CHANNEL:\tSTATE:\n"); 1799 seq_printf(s, "CHANNEL:\tSTATE:\n");
1907 seq_printf(s, "--------\t------\n"); 1800 seq_printf(s, "--------\t------\n");
1908 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1801 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1909 seq_printf(s, "%s\t\t\%s\n", chan->name, 1802 seq_printf(s, "%s\t\t%s\n", chan->name,
1910 pl08x_state_str(chan->state)); 1803 pl08x_state_str(chan->state));
1911 } 1804 }
1912 1805
@@ -1942,7 +1835,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1942static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1835static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1943{ 1836{
1944 struct pl08x_driver_data *pl08x; 1837 struct pl08x_driver_data *pl08x;
1945 struct vendor_data *vd = id->data; 1838 const struct vendor_data *vd = id->data;
1946 int ret = 0; 1839 int ret = 0;
1947 int i; 1840 int i;
1948 1841
@@ -1990,6 +1883,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1990 pl08x->adev = adev; 1883 pl08x->adev = adev;
1991 pl08x->vd = vd; 1884 pl08x->vd = vd;
1992 1885
1886 /* By default, AHB1 only. If dualmaster, from platform */
1887 pl08x->lli_buses = PL08X_AHB1;
1888 pl08x->mem_buses = PL08X_AHB1;
1889 if (pl08x->vd->dualmaster) {
1890 pl08x->lli_buses = pl08x->pd->lli_buses;
1891 pl08x->mem_buses = pl08x->pd->mem_buses;
1892 }
1893
1993 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1894 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1994 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1895 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1995 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1896 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
@@ -2009,14 +1910,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2009 /* Turn on the PL08x */ 1910 /* Turn on the PL08x */
2010 pl08x_ensure_on(pl08x); 1911 pl08x_ensure_on(pl08x);
2011 1912
2012 /* 1913 /* Attach the interrupt handler */
2013 * Attach the interrupt handler
2014 */
2015 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1914 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2016 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 1915 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2017 1916
2018 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 1917 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2019 vd->name, pl08x); 1918 DRIVER_NAME, pl08x);
2020 if (ret) { 1919 if (ret) {
2021 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 1920 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2022 __func__, adev->irq[0]); 1921 __func__, adev->irq[0]);
@@ -2087,8 +1986,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
2087 1986
2088 amba_set_drvdata(adev, pl08x); 1987 amba_set_drvdata(adev, pl08x);
2089 init_pl08x_debugfs(pl08x); 1988 init_pl08x_debugfs(pl08x);
2090 dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", 1989 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2091 vd->name, adev->res.start); 1990 amba_part(adev), amba_rev(adev),
1991 (unsigned long long)adev->res.start, adev->irq[0]);
2092 return 0; 1992 return 0;
2093 1993
2094out_no_slave_reg: 1994out_no_slave_reg:
@@ -2115,13 +2015,11 @@ out_no_pl08x:
2115 2015
2116/* PL080 has 8 channels and the PL080 have just 2 */ 2016/* PL080 has 8 channels and the PL080 have just 2 */
2117static struct vendor_data vendor_pl080 = { 2017static struct vendor_data vendor_pl080 = {
2118 .name = "PL080",
2119 .channels = 8, 2018 .channels = 8,
2120 .dualmaster = true, 2019 .dualmaster = true,
2121}; 2020};
2122 2021
2123static struct vendor_data vendor_pl081 = { 2022static struct vendor_data vendor_pl081 = {
2124 .name = "PL081",
2125 .channels = 2, 2023 .channels = 2,
2126 .dualmaster = false, 2024 .dualmaster = false,
2127}; 2025};
@@ -2160,7 +2058,7 @@ static int __init pl08x_init(void)
2160 retval = amba_driver_register(&pl08x_amba_driver); 2058 retval = amba_driver_register(&pl08x_amba_driver);
2161 if (retval) 2059 if (retval)
2162 printk(KERN_WARNING DRIVER_NAME 2060 printk(KERN_WARNING DRIVER_NAME
2163 "failed to register as an amba device (%d)\n", 2061 "failed to register as an AMBA device (%d)\n",
2164 retval); 2062 retval);
2165 return retval; 2063 return retval;
2166} 2064}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ea0ee81cff53..3d7d705f026f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
253 /* move myself to free_list */ 253 /* move myself to free_list */
254 list_move(&desc->desc_node, &atchan->free_list); 254 list_move(&desc->desc_node, &atchan->free_list);
255 255
256 /* unmap dma addresses */ 256 /* unmap dma addresses (not on slave channels) */
257 if (!atchan->chan_common.private) { 257 if (!atchan->chan_common.private) {
258 struct device *parent = chan2parent(&atchan->chan_common); 258 struct device *parent = chan2parent(&atchan->chan_common);
259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 259 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
583 desc->lli.ctrlb = ctrlb; 583 desc->lli.ctrlb = ctrlb;
584 584
585 desc->txd.cookie = 0; 585 desc->txd.cookie = 0;
586 async_tx_ack(&desc->txd);
587 586
588 if (!first) { 587 if (!first) {
589 first = desc; 588 first = desc;
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
604 /* set end-of-link to the last link descriptor of list*/ 603 /* set end-of-link to the last link descriptor of list*/
605 set_desc_eol(desc); 604 set_desc_eol(desc);
606 605
607 desc->txd.flags = flags; /* client is in control of this ack */ 606 first->txd.flags = flags; /* client is in control of this ack */
608 607
609 return &first->txd; 608 return &first->txd;
610 609
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
670 if (!desc) 669 if (!desc)
671 goto err_desc_get; 670 goto err_desc_get;
672 671
673 mem = sg_phys(sg); 672 mem = sg_dma_address(sg);
674 len = sg_dma_len(sg); 673 len = sg_dma_len(sg);
675 mem_width = 2; 674 mem_width = 2;
676 if (unlikely(mem & 3 || len & 3)) 675 if (unlikely(mem & 3 || len & 3))
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
712 if (!desc) 711 if (!desc)
713 goto err_desc_get; 712 goto err_desc_get;
714 713
715 mem = sg_phys(sg); 714 mem = sg_dma_address(sg);
716 len = sg_dma_len(sg); 715 len = sg_dma_len(sg);
717 mem_width = 2; 716 mem_width = 2;
718 if (unlikely(mem & 3 || len & 3)) 717 if (unlikely(mem & 3 || len & 3))
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
749 first->txd.cookie = -EBUSY; 748 first->txd.cookie = -EBUSY;
750 first->len = total_len; 749 first->len = total_len;
751 750
752 /* last link descriptor of list is responsible of flags */ 751 /* first link descriptor of list is responsible of flags */
753 prev->txd.flags = flags; /* client is in control of this ack */ 752 first->txd.flags = flags; /* client is in control of this ack */
754 753
755 return &first->txd; 754 return &first->txd;
756 755
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan)
854 853
855 dev_vdbg(chan2dev(chan), "issue_pending\n"); 854 dev_vdbg(chan2dev(chan), "issue_pending\n");
856 855
856 spin_lock_bh(&atchan->lock);
857 if (!atc_chan_is_enabled(atchan)) { 857 if (!atc_chan_is_enabled(atchan)) {
858 spin_lock_bh(&atchan->lock);
859 atc_advance_work(atchan); 858 atc_advance_work(atchan);
860 spin_unlock_bh(&atchan->lock);
861 } 859 }
860 spin_unlock_bh(&atchan->lock);
862} 861}
863 862
864/** 863/**
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void)
1210{ 1209{
1211 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1210 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1212} 1211}
1213module_init(at_dma_init); 1212subsys_initcall(at_dma_init);
1214 1213
1215static void __exit at_dma_exit(void) 1214static void __exit at_dma_exit(void)
1216{ 1215{
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e5e172d21692..4de947a450fc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support 2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 * 3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 * 5 *
6 * Author: 6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
@@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
1324 fdev->common.device_control = fsl_dma_device_control; 1324 fdev->common.device_control = fsl_dma_device_control;
1325 fdev->common.dev = &op->dev; 1325 fdev->common.dev = &op->dev;
1326 1326
1327 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1328
1327 dev_set_drvdata(&op->dev, fdev); 1329 dev_set_drvdata(&op->dev, fdev);
1328 1330
1329 /* 1331 /*
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 78266382797e..798f46a4590d 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
664 /*calculate CTL_LO*/ 664 /*calculate CTL_LO*/
665 ctl_lo.ctl_lo = 0; 665 ctl_lo.ctl_lo = 0;
666 ctl_lo.ctlx.int_en = 1; 666 ctl_lo.ctlx.int_en = 1;
667 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
668 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
669 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 667 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
670 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 668 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
671 669
670 /*
671 * Here we need some translation from "enum dma_slave_buswidth"
672 * to the format for our dma controller
673 * standard intel_mid_dmac's format
674 * 1 Byte 0b000
675 * 2 Bytes 0b001
676 * 4 Bytes 0b010
677 */
678 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
679 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
680
672 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 681 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
673 ctl_lo.ctlx.tt_fc = 0; 682 ctl_lo.ctlx.tt_fc = 0;
674 ctl_lo.ctlx.sinc = 0; 683 ctl_lo.ctlx.sinc = 0;
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
746 BUG_ON(!mids); 755 BUG_ON(!mids);
747 756
748 if (!midc->dma->pimr_mask) { 757 if (!midc->dma->pimr_mask) {
749 pr_debug("MDMA: SG list is not supported by this controller\n"); 758 /* We can still handle sg list with only one item */
750 return NULL; 759 if (sg_len == 1) {
760 txd = intel_mid_dma_prep_memcpy(chan,
761 mids->dma_slave.dst_addr,
762 mids->dma_slave.src_addr,
763 sgl->length,
764 flags);
765 return txd;
766 } else {
767 pr_warn("MDMA: SG list is not supported by this controller\n");
768 return NULL;
769 }
751 } 770 }
752 771
753 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 772 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
758 pr_err("MDMA: Prep memcpy failed\n"); 777 pr_err("MDMA: Prep memcpy failed\n");
759 return NULL; 778 return NULL;
760 } 779 }
780
761 desc = to_intel_mid_dma_desc(txd); 781 desc = to_intel_mid_dma_desc(txd);
762 desc->dirn = direction; 782 desc->dirn = direction;
763 ctl_lo.ctl_lo = desc->ctl_lo; 783 ctl_lo.ctl_lo = desc->ctl_lo;
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1021 1041
1022 /*DMA Interrupt*/ 1042 /*DMA Interrupt*/
1023 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1043 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1024 if (!mid) {
1025 pr_err("ERR_MDMA:null pointer mid\n");
1026 return -EINVAL;
1027 }
1028
1029 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1044 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1030 tfr_status &= mid->intr_mask; 1045 tfr_status &= mid->intr_mask;
1031 if (tfr_status) { 1046 if (tfr_status) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 161c452923b8..c6b01f535b29 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1261,7 +1261,7 @@ out:
1261 return err; 1261 return err;
1262} 1262}
1263 1263
1264#ifdef CONFIG_MD_RAID6_PQ 1264#ifdef CONFIG_RAID6_PQ
1265static int __devinit 1265static int __devinit
1266iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) 1266iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1267{ 1267{
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1584 1584
1585 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && 1585 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1586 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { 1586 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1587 #ifdef CONFIG_MD_RAID6_PQ 1587 #ifdef CONFIG_RAID6_PQ
1588 ret = iop_adma_pq_zero_sum_self_test(adev); 1588 ret = iop_adma_pq_zero_sum_self_test(adev);
1589 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); 1589 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1590 #else 1590 #else
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c064c89420d0..1c38418ae61f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Topcliff PCH DMA controller driver 2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation 3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
921} 922}
922 923
923/* PCI Device ID of DMA device */ 924/* PCI Device ID of DMA device */
924#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 925#define PCI_VENDOR_ID_ROHM 0x10DB
925#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 926#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
927#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
928#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
929#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
930#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
926 931
927static const struct pci_device_id pch_dma_id_table[] = { 932static const struct pci_device_id pch_dma_id_table[] = {
928 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, 933 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
929 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, 934 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
935 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
936 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
937 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
930 { 0, }, 938 { 0, },
931}; 939};
932 940
@@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void)
954module_init(pch_dma_init); 962module_init(pch_dma_init);
955module_exit(pch_dma_exit); 963module_exit(pch_dma_exit);
956 964
957MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); 965MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
966 "DMA controller driver");
958MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); 967MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
959MODULE_LICENSE("GPL v2"); 968MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index fab68a553205..6e1d46a65d0e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
554 return d; 555 return d;
555} 556}
556 557
557/* Support functions for logical channels */ 558static int d40_psize_2_burst_size(bool is_log, int psize)
559{
560 if (is_log) {
561 if (psize == STEDMA40_PSIZE_LOG_1)
562 return 1;
563 } else {
564 if (psize == STEDMA40_PSIZE_PHY_1)
565 return 1;
566 }
567
568 return 2 << psize;
569}
570
571/*
572 * The dma only supports transmitting packages up to
573 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
574 * dma elements required to send the entire sg list
575 */
576static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
577{
578 int dmalen;
579 u32 max_w = max(data_width1, data_width2);
580 u32 min_w = min(data_width1, data_width2);
581 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
582
583 if (seg_max > STEDMA40_MAX_SEG_SIZE)
584 seg_max -= (1 << max_w);
585
586 if (!IS_ALIGNED(size, 1 << max_w))
587 return -EINVAL;
588
589 if (size <= seg_max)
590 dmalen = 1;
591 else {
592 dmalen = size / seg_max;
593 if (dmalen * seg_max < size)
594 dmalen++;
595 }
596 return dmalen;
597}
598
599static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
600 u32 data_width1, u32 data_width2)
601{
602 struct scatterlist *sg;
603 int i;
604 int len = 0;
605 int ret;
606
607 for_each_sg(sgl, sg, sg_len, i) {
608 ret = d40_size_2_dmalen(sg_dma_len(sg),
609 data_width1, data_width2);
610 if (ret < 0)
611 return ret;
612 len += ret;
613 }
614 return len;
615}
558 616
617/* Support functions for logical channels */
559 618
560static int d40_channel_execute_command(struct d40_chan *d40c, 619static int d40_channel_execute_command(struct d40_chan *d40c,
561 enum d40_command command) 620 enum d40_command command)
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
1241 res = -EINVAL; 1300 res = -EINVAL;
1242 } 1301 }
1243 1302
1303 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1304 (1 << conf->src_info.data_width) !=
1305 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1306 (1 << conf->dst_info.data_width)) {
1307 /*
1308 * The DMAC hardware only supports
1309 * src (burst x width) == dst (burst x width)
1310 */
1311
1312 dev_err(&d40c->chan.dev->device,
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1314 __func__);
1315 res = -EINVAL;
1316 }
1317
1244 return res; 1318 return res;
1245} 1319}
1246 1320
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1638 if (d40d == NULL) 1712 if (d40d == NULL)
1639 goto err; 1713 goto err;
1640 1714
1641 d40d->lli_len = sgl_len; 1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
1716 d40c->dma_cfg.src_info.data_width,
1717 d40c->dma_cfg.dst_info.data_width);
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1721 goto err;
1722 }
1723
1642 d40d->lli_current = 0; 1724 d40d->lli_current = 0;
1643 d40d->txd.flags = dma_flags; 1725 d40d->txd.flags = dma_flags;
1644 1726
1645 if (d40c->log_num != D40_PHY_CHAN) { 1727 if (d40c->log_num != D40_PHY_CHAN) {
1646 1728
1647 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1648 dev_err(&d40c->chan.dev->device, 1730 dev_err(&d40c->chan.dev->device,
1649 "[%s] Out of memory\n", __func__); 1731 "[%s] Out of memory\n", __func__);
1650 goto err; 1732 goto err;
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1654 sgl_len, 1736 sgl_len,
1655 d40d->lli_log.src, 1737 d40d->lli_log.src,
1656 d40c->log_def.lcsp1, 1738 d40c->log_def.lcsp1,
1657 d40c->dma_cfg.src_info.data_width); 1739 d40c->dma_cfg.src_info.data_width,
1740 d40c->dma_cfg.dst_info.data_width);
1658 1741
1659 (void) d40_log_sg_to_lli(sgl_dst, 1742 (void) d40_log_sg_to_lli(sgl_dst,
1660 sgl_len, 1743 sgl_len,
1661 d40d->lli_log.dst, 1744 d40d->lli_log.dst,
1662 d40c->log_def.lcsp3, 1745 d40c->log_def.lcsp3,
1663 d40c->dma_cfg.dst_info.data_width); 1746 d40c->dma_cfg.dst_info.data_width,
1747 d40c->dma_cfg.src_info.data_width);
1664 } else { 1748 } else {
1665 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1666 dev_err(&d40c->chan.dev->device, 1750 dev_err(&d40c->chan.dev->device,
1667 "[%s] Out of memory\n", __func__); 1751 "[%s] Out of memory\n", __func__);
1668 goto err; 1752 goto err;
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1675 virt_to_phys(d40d->lli_phy.src), 1759 virt_to_phys(d40d->lli_phy.src),
1676 d40c->src_def_cfg, 1760 d40c->src_def_cfg,
1677 d40c->dma_cfg.src_info.data_width, 1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1678 d40c->dma_cfg.src_info.psize); 1763 d40c->dma_cfg.src_info.psize);
1679 1764
1680 if (res < 0) 1765 if (res < 0)
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1687 virt_to_phys(d40d->lli_phy.dst), 1772 virt_to_phys(d40d->lli_phy.dst),
1688 d40c->dst_def_cfg, 1773 d40c->dst_def_cfg,
1689 d40c->dma_cfg.dst_info.data_width, 1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1690 d40c->dma_cfg.dst_info.psize); 1776 d40c->dma_cfg.dst_info.psize);
1691 1777
1692 if (res < 0) 1778 if (res < 0)
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1826 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1912 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1827 chan); 1913 chan);
1828 unsigned long flags; 1914 unsigned long flags;
1829 int err = 0;
1830 1915
1831 if (d40c->phy_chan == NULL) { 1916 if (d40c->phy_chan == NULL) {
1832 dev_err(&d40c->chan.dev->device, 1917 dev_err(&d40c->chan.dev->device,
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1844 } 1929 }
1845 1930
1846 d40d->txd.flags = dma_flags; 1931 d40d->txd.flags = dma_flags;
1932 d40d->lli_len = d40_size_2_dmalen(size,
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1938 goto err;
1939 }
1940
1847 1941
1848 dma_async_tx_descriptor_init(&d40d->txd, chan); 1942 dma_async_tx_descriptor_init(&d40d->txd, chan);
1849 1943
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1851 1945
1852 if (d40c->log_num != D40_PHY_CHAN) { 1946 if (d40c->log_num != D40_PHY_CHAN) {
1853 1947
1854 if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1855 dev_err(&d40c->chan.dev->device, 1949 dev_err(&d40c->chan.dev->device,
1856 "[%s] Out of memory\n", __func__); 1950 "[%s] Out of memory\n", __func__);
1857 goto err; 1951 goto err;
1858 } 1952 }
1859 d40d->lli_len = 1;
1860 d40d->lli_current = 0; 1953 d40d->lli_current = 0;
1861 1954
1862 d40_log_fill_lli(d40d->lli_log.src, 1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1863 src, 1956 src,
1864 size, 1957 size,
1865 d40c->log_def.lcsp1, 1958 d40c->log_def.lcsp1,
1866 d40c->dma_cfg.src_info.data_width, 1959 d40c->dma_cfg.src_info.data_width,
1867 true); 1960 d40c->dma_cfg.dst_info.data_width,
1961 true) == NULL)
1962 goto err;
1868 1963
1869 d40_log_fill_lli(d40d->lli_log.dst, 1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1870 dst, 1965 dst,
1871 size, 1966 size,
1872 d40c->log_def.lcsp3, 1967 d40c->log_def.lcsp3,
1873 d40c->dma_cfg.dst_info.data_width, 1968 d40c->dma_cfg.dst_info.data_width,
1874 true); 1969 d40c->dma_cfg.src_info.data_width,
1970 true) == NULL)
1971 goto err;
1875 1972
1876 } else { 1973 } else {
1877 1974
1878 if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1879 dev_err(&d40c->chan.dev->device, 1976 dev_err(&d40c->chan.dev->device,
1880 "[%s] Out of memory\n", __func__); 1977 "[%s] Out of memory\n", __func__);
1881 goto err; 1978 goto err;
1882 } 1979 }
1883 1980
1884 err = d40_phy_fill_lli(d40d->lli_phy.src, 1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1885 src, 1982 src,
1886 size, 1983 size,
1887 d40c->dma_cfg.src_info.psize, 1984 d40c->dma_cfg.src_info.psize,
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1889 d40c->src_def_cfg, 1986 d40c->src_def_cfg,
1890 true, 1987 true,
1891 d40c->dma_cfg.src_info.data_width, 1988 d40c->dma_cfg.src_info.data_width,
1892 false); 1989 d40c->dma_cfg.dst_info.data_width,
1893 if (err) 1990 false) == NULL)
1894 goto err_fill_lli; 1991 goto err;
1895 1992
1896 err = d40_phy_fill_lli(d40d->lli_phy.dst, 1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1897 dst, 1994 dst,
1898 size, 1995 size,
1899 d40c->dma_cfg.dst_info.psize, 1996 d40c->dma_cfg.dst_info.psize,
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1901 d40c->dst_def_cfg, 1998 d40c->dst_def_cfg,
1902 true, 1999 true,
1903 d40c->dma_cfg.dst_info.data_width, 2000 d40c->dma_cfg.dst_info.data_width,
1904 false); 2001 d40c->dma_cfg.src_info.data_width,
1905 2002 false) == NULL)
1906 if (err) 2003 goto err;
1907 goto err_fill_lli;
1908 2004
1909 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1910 d40d->lli_pool.size, DMA_TO_DEVICE); 2006 d40d->lli_pool.size, DMA_TO_DEVICE);
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1913 spin_unlock_irqrestore(&d40c->lock, flags); 2009 spin_unlock_irqrestore(&d40c->lock, flags);
1914 return &d40d->txd; 2010 return &d40d->txd;
1915 2011
1916err_fill_lli:
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Failed filling in PHY LLI\n", __func__);
1919err: 2012err:
1920 if (d40d) 2013 if (d40d)
1921 d40_desc_free(d40c, d40d); 2014 d40_desc_free(d40c, d40d);
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1945 dma_addr_t dev_addr = 0; 2038 dma_addr_t dev_addr = 0;
1946 int total_size; 2039 int total_size;
1947 2040
1948 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2047 return -EINVAL;
2048 }
2049
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device, 2051 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__); 2052 "[%s] Out of memory\n", __func__);
1951 return -ENOMEM; 2053 return -ENOMEM;
1952 } 2054 }
1953 2055
1954 d40d->lli_len = sg_len;
1955 d40d->lli_current = 0; 2056 d40d->lli_current = 0;
1956 2057
1957 if (direction == DMA_FROM_DEVICE) 2058 if (direction == DMA_FROM_DEVICE)
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1993 dma_addr_t dst_dev_addr; 2094 dma_addr_t dst_dev_addr;
1994 int res; 2095 int res;
1995 2096
1996 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2103 return -EINVAL;
2104 }
2105
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1997 dev_err(&d40c->chan.dev->device, 2107 dev_err(&d40c->chan.dev->device,
1998 "[%s] Out of memory\n", __func__); 2108 "[%s] Out of memory\n", __func__);
1999 return -ENOMEM; 2109 return -ENOMEM;
2000 } 2110 }
2001 2111
2002 d40d->lli_len = sgl_len;
2003 d40d->lli_current = 0; 2112 d40d->lli_current = 0;
2004 2113
2005 if (direction == DMA_FROM_DEVICE) { 2114 if (direction == DMA_FROM_DEVICE) {
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2024 virt_to_phys(d40d->lli_phy.src), 2133 virt_to_phys(d40d->lli_phy.src),
2025 d40c->src_def_cfg, 2134 d40c->src_def_cfg,
2026 d40c->dma_cfg.src_info.data_width, 2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2027 d40c->dma_cfg.src_info.psize); 2137 d40c->dma_cfg.src_info.psize);
2028 if (res < 0) 2138 if (res < 0)
2029 return res; 2139 return res;
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2035 virt_to_phys(d40d->lli_phy.dst), 2145 virt_to_phys(d40d->lli_phy.dst),
2036 d40c->dst_def_cfg, 2146 d40c->dst_def_cfg,
2037 d40c->dma_cfg.dst_info.data_width, 2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2038 d40c->dma_cfg.dst_info.psize); 2149 d40c->dma_cfg.dst_info.psize);
2039 if (res < 0) 2150 if (res < 0)
2040 return res; 2151 return res;
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2244 psize = STEDMA40_PSIZE_PHY_8; 2355 psize = STEDMA40_PSIZE_PHY_8;
2245 else if (config_maxburst >= 4) 2356 else if (config_maxburst >= 4)
2246 psize = STEDMA40_PSIZE_PHY_4; 2357 psize = STEDMA40_PSIZE_PHY_4;
2358 else if (config_maxburst >= 2)
2359 psize = STEDMA40_PSIZE_PHY_2;
2247 else 2360 else
2248 psize = STEDMA40_PSIZE_PHY_1; 2361 psize = STEDMA40_PSIZE_PHY_1;
2249 } 2362 }
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 8557cb88b255..0b096a38322d 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) ST-Ericsson SA 2007-2010 2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson 3 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
122 *dst_cfg = dst; 122 *dst_cfg = dst;
123} 123}
124 124
125int d40_phy_fill_lli(struct d40_phy_lli *lli, 125static int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data, 126 dma_addr_t data,
127 u32 data_size, 127 u32 data_size,
128 int psize, 128 int psize,
129 dma_addr_t next_lli, 129 dma_addr_t next_lli,
130 u32 reg_cfg, 130 u32 reg_cfg,
131 bool term_int, 131 bool term_int,
132 u32 data_width, 132 u32 data_width,
133 bool is_device) 133 bool is_device)
134{ 134{
135 int num_elems; 135 int num_elems;
136 136
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
139 else 139 else
140 num_elems = 2 << psize; 140 num_elems = 2 << psize;
141 141
142 /*
143 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
144 * Block large than 64 KiB must be split.
145 */
146 if (data_size > (0xffff << data_width))
147 return -EINVAL;
148
149 /* Must be aligned */ 142 /* Must be aligned */
150 if (!IS_ALIGNED(data, 0x1 << data_width)) 143 if (!IS_ALIGNED(data, 0x1 << data_width))
151 return -EINVAL; 144 return -EINVAL;
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
187 return 0; 180 return 0;
188} 181}
189 182
183static int d40_seg_size(int size, int data_width1, int data_width2)
184{
185 u32 max_w = max(data_width1, data_width2);
186 u32 min_w = min(data_width1, data_width2);
187 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
188
189 if (seg_max > STEDMA40_MAX_SEG_SIZE)
190 seg_max -= (1 << max_w);
191
192 if (size <= seg_max)
193 return size;
194
195 if (size <= 2 * seg_max)
196 return ALIGN(size / 2, 1 << max_w);
197
198 return seg_max;
199}
200
201struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
202 dma_addr_t addr,
203 u32 size,
204 int psize,
205 dma_addr_t lli_phys,
206 u32 reg_cfg,
207 bool term_int,
208 u32 data_width1,
209 u32 data_width2,
210 bool is_device)
211{
212 int err;
213 dma_addr_t next = lli_phys;
214 int size_rest = size;
215 int size_seg = 0;
216
217 do {
218 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
219 size_rest -= size_seg;
220
221 if (term_int && size_rest == 0)
222 next = 0;
223 else
224 next = ALIGN(next + sizeof(struct d40_phy_lli),
225 D40_LLI_ALIGN);
226
227 err = d40_phy_fill_lli(lli,
228 addr,
229 size_seg,
230 psize,
231 next,
232 reg_cfg,
233 !next,
234 data_width1,
235 is_device);
236
237 if (err)
238 goto err;
239
240 lli++;
241 if (!is_device)
242 addr += size_seg;
243 } while (size_rest);
244
245 return lli;
246
247 err:
248 return NULL;
249}
250
190int d40_phy_sg_to_lli(struct scatterlist *sg, 251int d40_phy_sg_to_lli(struct scatterlist *sg,
191 int sg_len, 252 int sg_len,
192 dma_addr_t target, 253 dma_addr_t target,
193 struct d40_phy_lli *lli, 254 struct d40_phy_lli *lli_sg,
194 dma_addr_t lli_phys, 255 dma_addr_t lli_phys,
195 u32 reg_cfg, 256 u32 reg_cfg,
196 u32 data_width, 257 u32 data_width1,
258 u32 data_width2,
197 int psize) 259 int psize)
198{ 260{
199 int total_size = 0; 261 int total_size = 0;
200 int i; 262 int i;
201 struct scatterlist *current_sg = sg; 263 struct scatterlist *current_sg = sg;
202 dma_addr_t next_lli_phys;
203 dma_addr_t dst; 264 dma_addr_t dst;
204 int err = 0; 265 struct d40_phy_lli *lli = lli_sg;
266 dma_addr_t l_phys = lli_phys;
205 267
206 for_each_sg(sg, current_sg, sg_len, i) { 268 for_each_sg(sg, current_sg, sg_len, i) {
207 269
208 total_size += sg_dma_len(current_sg); 270 total_size += sg_dma_len(current_sg);
209 271
210 /* If this scatter list entry is the last one, no next link */
211 if (sg_len - 1 == i)
212 next_lli_phys = 0;
213 else
214 next_lli_phys = ALIGN(lli_phys + (i + 1) *
215 sizeof(struct d40_phy_lli),
216 D40_LLI_ALIGN);
217
218 if (target) 272 if (target)
219 dst = target; 273 dst = target;
220 else 274 else
221 dst = sg_phys(current_sg); 275 dst = sg_phys(current_sg);
222 276
223 err = d40_phy_fill_lli(&lli[i], 277 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
224 dst, 278 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
225 sg_dma_len(current_sg), 279
226 psize, 280 lli = d40_phy_buf_to_lli(lli,
227 next_lli_phys, 281 dst,
228 reg_cfg, 282 sg_dma_len(current_sg),
229 !next_lli_phys, 283 psize,
230 data_width, 284 l_phys,
231 target == dst); 285 reg_cfg,
232 if (err) 286 sg_len - 1 == i,
233 goto err; 287 data_width1,
288 data_width2,
289 target == dst);
290 if (lli == NULL)
291 return -EINVAL;
234 } 292 }
235 293
236 return total_size; 294 return total_size;
237err:
238 return err;
239} 295}
240 296
241 297
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
315 writel(lli_dst->lcsp13, &lcla[1].lcsp13); 371 writel(lli_dst->lcsp13, &lcla[1].lcsp13);
316} 372}
317 373
318void d40_log_fill_lli(struct d40_log_lli *lli, 374static void d40_log_fill_lli(struct d40_log_lli *lli,
319 dma_addr_t data, u32 data_size, 375 dma_addr_t data, u32 data_size,
320 u32 reg_cfg, 376 u32 reg_cfg,
321 u32 data_width, 377 u32 data_width,
322 bool addr_inc) 378 bool addr_inc)
323{ 379{
324 lli->lcsp13 = reg_cfg; 380 lli->lcsp13 = reg_cfg;
325 381
326 /* The number of elements to transfer */ 382 /* The number of elements to transfer */
327 lli->lcsp02 = ((data_size >> data_width) << 383 lli->lcsp02 = ((data_size >> data_width) <<
328 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 384 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
385
386 BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
387
329 /* 16 LSBs address of the current element */ 388 /* 16 LSBs address of the current element */
330 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 389 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
331 /* 16 MSBs address of the current element */ 390 /* 16 MSBs address of the current element */
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
348 int total_size = 0; 407 int total_size = 0;
349 struct scatterlist *current_sg = sg; 408 struct scatterlist *current_sg = sg;
350 int i; 409 int i;
410 struct d40_log_lli *lli_src = lli->src;
411 struct d40_log_lli *lli_dst = lli->dst;
351 412
352 for_each_sg(sg, current_sg, sg_len, i) { 413 for_each_sg(sg, current_sg, sg_len, i) {
353 total_size += sg_dma_len(current_sg); 414 total_size += sg_dma_len(current_sg);
354 415
355 if (direction == DMA_TO_DEVICE) { 416 if (direction == DMA_TO_DEVICE) {
356 d40_log_fill_lli(&lli->src[i], 417 lli_src =
357 sg_phys(current_sg), 418 d40_log_buf_to_lli(lli_src,
358 sg_dma_len(current_sg), 419 sg_phys(current_sg),
359 lcsp->lcsp1, src_data_width, 420 sg_dma_len(current_sg),
360 true); 421 lcsp->lcsp1, src_data_width,
361 d40_log_fill_lli(&lli->dst[i], 422 dst_data_width,
362 dev_addr, 423 true);
363 sg_dma_len(current_sg), 424 lli_dst =
364 lcsp->lcsp3, dst_data_width, 425 d40_log_buf_to_lli(lli_dst,
365 false); 426 dev_addr,
427 sg_dma_len(current_sg),
428 lcsp->lcsp3, dst_data_width,
429 src_data_width,
430 false);
366 } else { 431 } else {
367 d40_log_fill_lli(&lli->dst[i], 432 lli_dst =
368 sg_phys(current_sg), 433 d40_log_buf_to_lli(lli_dst,
369 sg_dma_len(current_sg), 434 sg_phys(current_sg),
370 lcsp->lcsp3, dst_data_width, 435 sg_dma_len(current_sg),
371 true); 436 lcsp->lcsp3, dst_data_width,
372 d40_log_fill_lli(&lli->src[i], 437 src_data_width,
373 dev_addr, 438 true);
374 sg_dma_len(current_sg), 439 lli_src =
375 lcsp->lcsp1, src_data_width, 440 d40_log_buf_to_lli(lli_src,
376 false); 441 dev_addr,
442 sg_dma_len(current_sg),
443 lcsp->lcsp1, src_data_width,
444 dst_data_width,
445 false);
377 } 446 }
378 } 447 }
379 return total_size; 448 return total_size;
380} 449}
381 450
451struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
452 dma_addr_t addr,
453 int size,
454 u32 lcsp13, /* src or dst*/
455 u32 data_width1,
456 u32 data_width2,
457 bool addr_inc)
458{
459 struct d40_log_lli *lli = lli_sg;
460 int size_rest = size;
461 int size_seg = 0;
462
463 do {
464 size_seg = d40_seg_size(size_rest, data_width1, data_width2);
465 size_rest -= size_seg;
466
467 d40_log_fill_lli(lli,
468 addr,
469 size_seg,
470 lcsp13, data_width1,
471 addr_inc);
472 if (addr_inc)
473 addr += size_seg;
474 lli++;
475 } while (size_rest);
476
477 return lli;
478}
479
382int d40_log_sg_to_lli(struct scatterlist *sg, 480int d40_log_sg_to_lli(struct scatterlist *sg,
383 int sg_len, 481 int sg_len,
384 struct d40_log_lli *lli_sg, 482 struct d40_log_lli *lli_sg,
385 u32 lcsp13, /* src or dst*/ 483 u32 lcsp13, /* src or dst*/
386 u32 data_width) 484 u32 data_width1, u32 data_width2)
387{ 485{
388 int total_size = 0; 486 int total_size = 0;
389 struct scatterlist *current_sg = sg; 487 struct scatterlist *current_sg = sg;
390 int i; 488 int i;
489 struct d40_log_lli *lli = lli_sg;
391 490
392 for_each_sg(sg, current_sg, sg_len, i) { 491 for_each_sg(sg, current_sg, sg_len, i) {
393 total_size += sg_dma_len(current_sg); 492 total_size += sg_dma_len(current_sg);
394 493 lli = d40_log_buf_to_lli(lli,
395 d40_log_fill_lli(&lli_sg[i], 494 sg_phys(current_sg),
396 sg_phys(current_sg), 495 sg_dma_len(current_sg),
397 sg_dma_len(current_sg), 496 lcsp13,
398 lcsp13, data_width, 497 data_width1, data_width2, true);
399 true);
400 } 498 }
401 return total_size; 499 return total_size;
402} 500}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9e419b907544..9cc43495bea2 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
292 struct d40_phy_lli *lli, 292 struct d40_phy_lli *lli,
293 dma_addr_t lli_phys, 293 dma_addr_t lli_phys,
294 u32 reg_cfg, 294 u32 reg_cfg,
295 u32 data_width, 295 u32 data_width1,
296 u32 data_width2,
296 int psize); 297 int psize);
297 298
298int d40_phy_fill_lli(struct d40_phy_lli *lli, 299struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
299 dma_addr_t data, 300 dma_addr_t data,
300 u32 data_size, 301 u32 data_size,
301 int psize, 302 int psize,
302 dma_addr_t next_lli, 303 dma_addr_t next_lli,
303 u32 reg_cfg, 304 u32 reg_cfg,
304 bool term_int, 305 bool term_int,
305 u32 data_width, 306 u32 data_width1,
306 bool is_device); 307 u32 data_width2,
308 bool is_device);
307 309
308void d40_phy_lli_write(void __iomem *virtbase, 310void d40_phy_lli_write(void __iomem *virtbase,
309 u32 phy_chan_num, 311 u32 phy_chan_num,
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
312 314
313/* Logical channels */ 315/* Logical channels */
314 316
315void d40_log_fill_lli(struct d40_log_lli *lli, 317struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
316 dma_addr_t data, 318 dma_addr_t addr,
317 u32 data_size, 319 int size,
318 u32 reg_cfg, 320 u32 lcsp13, /* src or dst*/
319 u32 data_width, 321 u32 data_width1, u32 data_width2,
320 bool addr_inc); 322 bool addr_inc);
321 323
322int d40_log_sg_to_dev(struct scatterlist *sg, 324int d40_log_sg_to_dev(struct scatterlist *sg,
323 int sg_len, 325 int sg_len,
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
332 int sg_len, 334 int sg_len,
333 struct d40_log_lli *lli_sg, 335 struct d40_log_lli *lli_sg,
334 u32 lcsp13, /* src or dst*/ 336 u32 lcsp13, /* src or dst*/
335 u32 data_width); 337 u32 data_width1, u32 data_width2);
336 338
337void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 339void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
338 struct d40_log_lli *lli_dst, 340 struct d40_log_lli *lli_dst,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc58025d..23e03554f0d3 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
826/* Display and decode various NB registers for debug purposes. */ 826/* Display and decode various NB registers for debug purposes. */
827static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 827static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
828{ 828{
829 int ganged;
830
831 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 829 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
832 830
833 debugf1(" NB two channel DRAM capable: %s\n", 831 debugf1(" NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
851 debugf1(" DramHoleValid: %s\n", 849 debugf1(" DramHoleValid: %s\n",
852 (pvt->dhar & DHAR_VALID) ? "yes" : "no"); 850 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
853 851
852 amd64_debug_display_dimm_sizes(0, pvt);
853
854 /* everything below this point is Fam10h and above */ 854 /* everything below this point is Fam10h and above */
855 if (boot_cpu_data.x86 == 0xf) { 855 if (boot_cpu_data.x86 == 0xf)
856 amd64_debug_display_dimm_sizes(0, pvt);
857 return; 856 return;
858 } 857
858 amd64_debug_display_dimm_sizes(1, pvt);
859 859
860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); 860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
861 861
862 /* Only if NOT ganged does dclr1 have valid info */ 862 /* Only if NOT ganged does dclr1 have valid info */
863 if (!dct_ganging_enabled(pvt)) 863 if (!dct_ganging_enabled(pvt))
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 864 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865
866 /*
867 * Determine if ganged and then dump memory sizes for first controller,
868 * and if NOT ganged dump info for 2nd controller.
869 */
870 ganged = dct_ganging_enabled(pvt);
871
872 amd64_debug_display_dimm_sizes(0, pvt);
873
874 if (!ganged)
875 amd64_debug_display_dimm_sizes(1, pvt);
876} 865}
877 866
878/* Read in both of DBAM registers */ 867/* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1644 WARN_ON(ctrl != 0); 1633 WARN_ON(ctrl != 0);
1645 } 1634 }
1646 1635
1647 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 1636 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1648 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); 1637 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
1649 1638
1650 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1639 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1651 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1652 1640
1653 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1641 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1654 1642
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 68f942cb30f2..0c56989cd907 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -49,15 +49,13 @@ config FIREWIRE_SBP2
49 configuration section. 49 configuration section.
50 50
51config FIREWIRE_NET 51config FIREWIRE_NET
52 tristate "IP networking over 1394 (EXPERIMENTAL)" 52 tristate "IP networking over 1394"
53 depends on FIREWIRE && INET && EXPERIMENTAL 53 depends on FIREWIRE && INET
54 help 54 help
55 This enables IPv4 over IEEE 1394, providing IP connectivity with 55 This enables IPv4 over IEEE 1394, providing IP connectivity with
56 other implementations of RFC 2734 as found on several operating 56 other implementations of RFC 2734 as found on several operating
57 systems. Multicast support is currently limited. 57 systems. Multicast support is currently limited.
58 58
59 NOTE, this driver is not stable yet!
60
61 To compile this driver as a module, say M here: The module will be 59 To compile this driver as a module, say M here: The module will be
62 called firewire-net. 60 called firewire-net.
63 61
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index be0492398ef9..24ff35511e2b 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
75#define BIB_IRMC ((1) << 31) 75#define BIB_IRMC ((1) << 31)
76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ 76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
77 77
78#define CANON_OUI 0x000085
79
78static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 80static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
79{ 81{
80 struct fw_descriptor *desc; 82 struct fw_descriptor *desc;
@@ -284,6 +286,7 @@ static void bm_work(struct work_struct *work)
284 bool root_device_is_running; 286 bool root_device_is_running;
285 bool root_device_is_cmc; 287 bool root_device_is_cmc;
286 bool irm_is_1394_1995_only; 288 bool irm_is_1394_1995_only;
289 bool keep_this_irm;
287 290
288 spin_lock_irq(&card->lock); 291 spin_lock_irq(&card->lock);
289 292
@@ -305,6 +308,10 @@ static void bm_work(struct work_struct *work)
305 irm_is_1394_1995_only = irm_device && irm_device->config_rom && 308 irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
306 (irm_device->config_rom[2] & 0x000000f0) == 0; 309 (irm_device->config_rom[2] & 0x000000f0) == 0;
307 310
311 /* Canon MV5i works unreliably if it is not root node. */
312 keep_this_irm = irm_device && irm_device->config_rom &&
313 irm_device->config_rom[3] >> 8 == CANON_OUI;
314
308 root_id = root_node->node_id; 315 root_id = root_node->node_id;
309 irm_id = card->irm_node->node_id; 316 irm_id = card->irm_node->node_id;
310 local_id = card->local_node->node_id; 317 local_id = card->local_node->node_id;
@@ -333,7 +340,7 @@ static void bm_work(struct work_struct *work)
333 goto pick_me; 340 goto pick_me;
334 } 341 }
335 342
336 if (irm_is_1394_1995_only) { 343 if (irm_is_1394_1995_only && !keep_this_irm) {
337 new_root_id = local_id; 344 new_root_id = local_id;
338 fw_notify("%s, making local node (%02x) root.\n", 345 fw_notify("%s, making local node (%02x) root.\n",
339 "IRM is not 1394a compliant", new_root_id); 346 "IRM is not 1394a compliant", new_root_id);
@@ -382,7 +389,7 @@ static void bm_work(struct work_struct *work)
382 389
383 spin_lock_irq(&card->lock); 390 spin_lock_irq(&card->lock);
384 391
385 if (rcode != RCODE_COMPLETE) { 392 if (rcode != RCODE_COMPLETE && !keep_this_irm) {
386 /* 393 /*
387 * The lock request failed, maybe the IRM 394 * The lock request failed, maybe the IRM
388 * isn't really IRM capable after all. Let's 395 * isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index c2e194c58667..7ed08fd1214e 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -191,6 +191,7 @@ struct fwnet_peer {
191 struct fwnet_device *dev; 191 struct fwnet_device *dev;
192 u64 guid; 192 u64 guid;
193 u64 fifo; 193 u64 fifo;
194 __be32 ip;
194 195
195 /* guarded by dev->lock */ 196 /* guarded by dev->lock */
196 struct list_head pd_list; /* received partial datagrams */ 197 struct list_head pd_list; /* received partial datagrams */
@@ -570,6 +571,8 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
570 peer->speed = sspd; 571 peer->speed = sspd;
571 if (peer->max_payload > max_payload) 572 if (peer->max_payload > max_payload)
572 peer->max_payload = max_payload; 573 peer->max_payload = max_payload;
574
575 peer->ip = arp1394->sip;
573 } 576 }
574 spin_unlock_irqrestore(&dev->lock, flags); 577 spin_unlock_irqrestore(&dev->lock, flags);
575 578
@@ -1470,6 +1473,7 @@ static int fwnet_add_peer(struct fwnet_device *dev,
1470 peer->dev = dev; 1473 peer->dev = dev;
1471 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1474 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1472 peer->fifo = FWNET_NO_FIFO_ADDR; 1475 peer->fifo = FWNET_NO_FIFO_ADDR;
1476 peer->ip = 0;
1473 INIT_LIST_HEAD(&peer->pd_list); 1477 INIT_LIST_HEAD(&peer->pd_list);
1474 peer->pdg_size = 0; 1478 peer->pdg_size = 0;
1475 peer->datagram_label = 0; 1479 peer->datagram_label = 0;
@@ -1589,10 +1593,13 @@ static int fwnet_remove(struct device *_dev)
1589 1593
1590 mutex_lock(&fwnet_device_mutex); 1594 mutex_lock(&fwnet_device_mutex);
1591 1595
1596 net = dev->netdev;
1597 if (net && peer->ip)
1598 arp_invalidate(net, peer->ip);
1599
1592 fwnet_remove_peer(peer, dev); 1600 fwnet_remove_peer(peer, dev);
1593 1601
1594 if (list_empty(&dev->peer_list)) { 1602 if (list_empty(&dev->peer_list)) {
1595 net = dev->netdev;
1596 unregister_netdev(net); 1603 unregister_netdev(net);
1597 1604
1598 if (dev->local_fifo != FWNET_NO_FIFO_ADDR) 1605 if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e8b6a13515bd..e710424b59ea 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -27,7 +27,7 @@ config EDD_OFF
27 using the kernel parameter 'edd={on|skipmbr|off}'. 27 using the kernel parameter 'edd={on|skipmbr|off}'.
28 28
29config FIRMWARE_MEMMAP 29config FIRMWARE_MEMMAP
30 bool "Add firmware-provided memory map to sysfs" if EMBEDDED 30 bool "Add firmware-provided memory map to sysfs" if EXPERT
31 default X86 31 default X86
32 help 32 help
33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap. 33 Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 815d98b2c1ba..0d05ea7d499b 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -11,14 +11,13 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pci.h> 14#include <linux/platform_device.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/cs5535.h> 17#include <linux/cs5535.h>
18#include <asm/msr.h> 18#include <asm/msr.h>
19 19
20#define DRV_NAME "cs5535-gpio" 20#define DRV_NAME "cs5535-gpio"
21#define GPIO_BAR 1
22 21
23/* 22/*
24 * Some GPIO pins 23 * Some GPIO pins
@@ -47,7 +46,7 @@ static struct cs5535_gpio_chip {
47 struct gpio_chip chip; 46 struct gpio_chip chip;
48 resource_size_t base; 47 resource_size_t base;
49 48
50 struct pci_dev *pdev; 49 struct platform_device *pdev;
51 spinlock_t lock; 50 spinlock_t lock;
52} cs5535_gpio_chip; 51} cs5535_gpio_chip;
53 52
@@ -301,10 +300,10 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
301 }, 300 },
302}; 301};
303 302
304static int __init cs5535_gpio_probe(struct pci_dev *pdev, 303static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
305 const struct pci_device_id *pci_id)
306{ 304{
307 int err; 305 struct resource *res;
306 int err = -EIO;
308 ulong mask_orig = mask; 307 ulong mask_orig = mask;
309 308
310 /* There are two ways to get the GPIO base address; one is by 309 /* There are two ways to get the GPIO base address; one is by
@@ -314,25 +313,23 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev,
314 * it turns out to be unreliable in the face of crappy BIOSes, we 313 * it turns out to be unreliable in the face of crappy BIOSes, we
315 * can always go back to using MSRs.. */ 314 * can always go back to using MSRs.. */
316 315
317 err = pci_enable_device_io(pdev); 316 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
318 if (err) { 317 if (!res) {
319 dev_err(&pdev->dev, "can't enable device IO\n"); 318 dev_err(&pdev->dev, "can't fetch device resource info\n");
320 goto done; 319 goto done;
321 } 320 }
322 321
323 err = pci_request_region(pdev, GPIO_BAR, DRV_NAME); 322 if (!request_region(res->start, resource_size(res), pdev->name)) {
324 if (err) { 323 dev_err(&pdev->dev, "can't request region\n");
325 dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
326 goto done; 324 goto done;
327 } 325 }
328 326
329 /* set up the driver-specific struct */ 327 /* set up the driver-specific struct */
330 cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR); 328 cs5535_gpio_chip.base = res->start;
331 cs5535_gpio_chip.pdev = pdev; 329 cs5535_gpio_chip.pdev = pdev;
332 spin_lock_init(&cs5535_gpio_chip.lock); 330 spin_lock_init(&cs5535_gpio_chip.lock);
333 331
334 dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR, 332 dev_info(&pdev->dev, "reserved resource region %pR\n", res);
335 (unsigned long long) cs5535_gpio_chip.base);
336 333
337 /* mask out reserved pins */ 334 /* mask out reserved pins */
338 mask &= 0x1F7FFFFF; 335 mask &= 0x1F7FFFFF;
@@ -350,78 +347,49 @@ static int __init cs5535_gpio_probe(struct pci_dev *pdev,
350 if (err) 347 if (err)
351 goto release_region; 348 goto release_region;
352 349
353 dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n"); 350 dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
354 return 0; 351 return 0;
355 352
356release_region: 353release_region:
357 pci_release_region(pdev, GPIO_BAR); 354 release_region(res->start, resource_size(res));
358done: 355done:
359 return err; 356 return err;
360} 357}
361 358
362static void __exit cs5535_gpio_remove(struct pci_dev *pdev) 359static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
363{ 360{
361 struct resource *r;
364 int err; 362 int err;
365 363
366 err = gpiochip_remove(&cs5535_gpio_chip.chip); 364 err = gpiochip_remove(&cs5535_gpio_chip.chip);
367 if (err) { 365 if (err) {
368 /* uhh? */ 366 /* uhh? */
369 dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); 367 dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
368 return err;
370 } 369 }
371 pci_release_region(pdev, GPIO_BAR);
372}
373
374static struct pci_device_id cs5535_gpio_pci_tbl[] = {
375 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
376 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
377 { 0, },
378};
379MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
380 370
381/* 371 r = platform_get_resource(pdev, IORESOURCE_IO, 0);
382 * We can't use the standard PCI driver registration stuff here, since 372 release_region(r->start, resource_size(r));
383 * that allows only one driver to bind to each PCI device (and we want 373 return 0;
384 * multiple drivers to be able to bind to the device). Instead, manually
385 * scan for the PCI device, request a single region, and keep track of the
386 * devices that we're using.
387 */
388
389static int __init cs5535_gpio_scan_pci(void)
390{
391 struct pci_dev *pdev;
392 int err = -ENODEV;
393 int i;
394
395 for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
396 pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
397 cs5535_gpio_pci_tbl[i].device, NULL);
398 if (pdev) {
399 err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
400 if (err)
401 pci_dev_put(pdev);
402
403 /* we only support a single CS5535/6 southbridge */
404 break;
405 }
406 }
407
408 return err;
409} 374}
410 375
411static void __exit cs5535_gpio_free_pci(void) 376static struct platform_driver cs5535_gpio_drv = {
412{ 377 .driver = {
413 cs5535_gpio_remove(cs5535_gpio_chip.pdev); 378 .name = DRV_NAME,
414 pci_dev_put(cs5535_gpio_chip.pdev); 379 .owner = THIS_MODULE,
415} 380 },
381 .probe = cs5535_gpio_probe,
382 .remove = __devexit_p(cs5535_gpio_remove),
383};
416 384
417static int __init cs5535_gpio_init(void) 385static int __init cs5535_gpio_init(void)
418{ 386{
419 return cs5535_gpio_scan_pci(); 387 return platform_driver_register(&cs5535_gpio_drv);
420} 388}
421 389
422static void __exit cs5535_gpio_exit(void) 390static void __exit cs5535_gpio_exit(void)
423{ 391{
424 cs5535_gpio_free_pci(); 392 platform_driver_unregister(&cs5535_gpio_drv);
425} 393}
426 394
427module_init(cs5535_gpio_init); 395module_init(cs5535_gpio_init);
@@ -430,3 +398,4 @@ module_exit(cs5535_gpio_exit);
430MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 398MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
431MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver"); 399MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
432MODULE_LICENSE("GPL"); 400MODULE_LICENSE("GPL");
401MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index d81cc748e77f..54d70a47afc1 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -187,7 +187,7 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
187 187
188static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) 188static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
189{ 189{
190 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); 190 struct lnw_gpio *lnw = get_irq_data(irq);
191 u32 base, gpio; 191 u32 base, gpio;
192 void __iomem *gedr; 192 void __iomem *gedr;
193 u32 gedr_v; 193 u32 gedr_v;
@@ -206,7 +206,12 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
206 /* clear the edge detect status bit */ 206 /* clear the edge detect status bit */
207 writel(gedr_v, gedr); 207 writel(gedr_v, gedr);
208 } 208 }
209 desc->chip->eoi(irq); 209
210 if (desc->chip->irq_eoi)
211 desc->chip->irq_eoi(irq_get_irq_data(irq));
212 else
213 dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
214
210} 215}
211 216
212static int __devinit lnw_gpio_probe(struct pci_dev *pdev, 217static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a261972f603d..b473429eee75 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@ struct pca953x_chip {
60 unsigned gpio_start; 60 unsigned gpio_start;
61 uint16_t reg_output; 61 uint16_t reg_output;
62 uint16_t reg_direction; 62 uint16_t reg_direction;
63 struct mutex i2c_lock;
63 64
64#ifdef CONFIG_GPIO_PCA953X_IRQ 65#ifdef CONFIG_GPIO_PCA953X_IRQ
65 struct mutex irq_lock; 66 struct mutex irq_lock;
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
119 120
120 chip = container_of(gc, struct pca953x_chip, gpio_chip); 121 chip = container_of(gc, struct pca953x_chip, gpio_chip);
121 122
123 mutex_lock(&chip->i2c_lock);
122 reg_val = chip->reg_direction | (1u << off); 124 reg_val = chip->reg_direction | (1u << off);
123 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 125 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
124 if (ret) 126 if (ret)
125 return ret; 127 goto exit;
126 128
127 chip->reg_direction = reg_val; 129 chip->reg_direction = reg_val;
128 return 0; 130 ret = 0;
131exit:
132 mutex_unlock(&chip->i2c_lock);
133 return ret;
129} 134}
130 135
131static int pca953x_gpio_direction_output(struct gpio_chip *gc, 136static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
137 142
138 chip = container_of(gc, struct pca953x_chip, gpio_chip); 143 chip = container_of(gc, struct pca953x_chip, gpio_chip);
139 144
145 mutex_lock(&chip->i2c_lock);
140 /* set output level */ 146 /* set output level */
141 if (val) 147 if (val)
142 reg_val = chip->reg_output | (1u << off); 148 reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
145 151
146 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 152 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
147 if (ret) 153 if (ret)
148 return ret; 154 goto exit;
149 155
150 chip->reg_output = reg_val; 156 chip->reg_output = reg_val;
151 157
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
153 reg_val = chip->reg_direction & ~(1u << off); 159 reg_val = chip->reg_direction & ~(1u << off);
154 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 160 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
155 if (ret) 161 if (ret)
156 return ret; 162 goto exit;
157 163
158 chip->reg_direction = reg_val; 164 chip->reg_direction = reg_val;
159 return 0; 165 ret = 0;
166exit:
167 mutex_unlock(&chip->i2c_lock);
168 return ret;
160} 169}
161 170
162static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) 171static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
167 176
168 chip = container_of(gc, struct pca953x_chip, gpio_chip); 177 chip = container_of(gc, struct pca953x_chip, gpio_chip);
169 178
179 mutex_lock(&chip->i2c_lock);
170 ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val); 180 ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
181 mutex_unlock(&chip->i2c_lock);
171 if (ret < 0) { 182 if (ret < 0) {
172 /* NOTE: diagnostic already emitted; that's all we should 183 /* NOTE: diagnostic already emitted; that's all we should
173 * do unless gpio_*_value_cansleep() calls become different 184 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
187 198
188 chip = container_of(gc, struct pca953x_chip, gpio_chip); 199 chip = container_of(gc, struct pca953x_chip, gpio_chip);
189 200
201 mutex_lock(&chip->i2c_lock);
190 if (val) 202 if (val)
191 reg_val = chip->reg_output | (1u << off); 203 reg_val = chip->reg_output | (1u << off);
192 else 204 else
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
194 206
195 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 207 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
196 if (ret) 208 if (ret)
197 return; 209 goto exit;
198 210
199 chip->reg_output = reg_val; 211 chip->reg_output = reg_val;
212exit:
213 mutex_unlock(&chip->i2c_lock);
200} 214}
201 215
202static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) 216static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client,
517 531
518 chip->names = pdata->names; 532 chip->names = pdata->names;
519 533
534 mutex_init(&chip->i2c_lock);
535
520 /* initialize cached registers from their original values. 536 /* initialize cached registers from their original values.
521 * we can't share this chip with another i2c master. 537 * we can't share this chip with another i2c master.
522 */ 538 */
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 349131eb1ce0..58c8f30352dd 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -193,13 +193,13 @@ out:
193 return ret; 193 return ret;
194} 194}
195 195
196static void timbgpio_irq(struct irq_data *d, struct irq_desc *desc) 196static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
197{ 197{
198 struct timbgpio *tgpio = irq_data_get_irq_data(d); 198 struct timbgpio *tgpio = get_irq_data(irq);
199 unsigned long ipr; 199 unsigned long ipr;
200 int offset; 200 int offset;
201 201
202 desc->irq_data.chip->ack(irq_get_irq_data(d)); 202 desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
203 ipr = ioread32(tgpio->membase + TGPIO_IPR); 203 ipr = ioread32(tgpio->membase + TGPIO_IPR);
204 iowrite32(ipr, tgpio->membase + TGPIO_ICR); 204 iowrite32(ipr, tgpio->membase + TGPIO_ICR);
205 205
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7af443672626..0902d4460039 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@ config DRM_KMS_HELPER
23 tristate 23 tristate
24 depends on DRM 24 depends on DRM
25 select FB 25 select FB
26 select FRAMEBUFFER_CONSOLE if !EMBEDDED 26 select FRAMEBUFFER_CONSOLE if !EXPERT
27 help 27 help
28 FB and CRTC helpers for KMS drivers. 28 FB and CRTC helpers for KMS drivers.
29 29
@@ -100,14 +100,16 @@ config DRM_I830
100config DRM_I915 100config DRM_I915
101 tristate "i915 driver" 101 tristate "i915 driver"
102 depends on AGP_INTEL 102 depends on AGP_INTEL
103 # we need shmfs for the swappable backing store, and in particular
104 # the shmem_readpage() which depends upon tmpfs
103 select SHMEM 105 select SHMEM
106 select TMPFS
104 select DRM_KMS_HELPER 107 select DRM_KMS_HELPER
105 select FB_CFB_FILLRECT 108 select FB_CFB_FILLRECT
106 select FB_CFB_COPYAREA 109 select FB_CFB_COPYAREA
107 select FB_CFB_IMAGEBLIT 110 select FB_CFB_IMAGEBLIT
108 # i915 depends on ACPI_VIDEO when ACPI is enabled 111 # i915 depends on ACPI_VIDEO when ACPI is enabled
109 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 112 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
110 select VIDEO_OUTPUT_CONTROL if ACPI
111 select BACKLIGHT_CLASS_DEVICE if ACPI 113 select BACKLIGHT_CLASS_DEVICE if ACPI
112 select INPUT if ACPI 114 select INPUT if ACPI
113 select ACPI_VIDEO if ACPI 115 select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa6708e44c..654faa803dcb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@ out:
2674 mutex_unlock(&dev->mode_config.mutex); 2674 mutex_unlock(&dev->mode_config.mutex);
2675 return ret; 2675 return ret;
2676} 2676}
2677
2678void drm_mode_config_reset(struct drm_device *dev)
2679{
2680 struct drm_crtc *crtc;
2681 struct drm_encoder *encoder;
2682 struct drm_connector *connector;
2683
2684 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2685 if (crtc->funcs->reset)
2686 crtc->funcs->reset(crtc);
2687
2688 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
2689 if (encoder->funcs->reset)
2690 encoder->funcs->reset(encoder);
2691
2692 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
2693 if (connector->funcs->reset)
2694 connector->funcs->reset(connector);
2695}
2696EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 952b3d4fb2a6..92369655dca3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
343 struct drm_encoder *encoder; 343 struct drm_encoder *encoder;
344 bool ret = true; 344 bool ret = true;
345 345
346 adjusted_mode = drm_mode_duplicate(dev, mode);
347
348 crtc->enabled = drm_helper_crtc_in_use(crtc); 346 crtc->enabled = drm_helper_crtc_in_use(crtc);
349
350 if (!crtc->enabled) 347 if (!crtc->enabled)
351 return true; 348 return true;
352 349
350 adjusted_mode = drm_mode_duplicate(dev, mode);
351
353 saved_hwmode = crtc->hwmode; 352 saved_hwmode = crtc->hwmode;
354 saved_mode = crtc->mode; 353 saved_mode = crtc->mode;
355 saved_x = crtc->x; 354 saved_x = crtc->x;
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
437 */ 436 */
438 drm_calc_timestamping_constants(crtc); 437 drm_calc_timestamping_constants(crtc);
439 438
440 /* XXX free adjustedmode */
441 drm_mode_destroy(dev, adjusted_mode);
442 /* FIXME: add subpixel order */ 439 /* FIXME: add subpixel order */
443done: 440done:
441 drm_mode_destroy(dev, adjusted_mode);
444 if (!ret) { 442 if (!ret) {
445 crtc->hwmode = saved_hwmode; 443 crtc->hwmode = saved_hwmode;
446 crtc->mode = saved_mode; 444 crtc->mode = saved_mode;
@@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
497 495
498 crtc_funcs = set->crtc->helper_private; 496 crtc_funcs = set->crtc->helper_private;
499 497
498 if (!set->mode)
499 set->fb = NULL;
500
500 if (set->fb) { 501 if (set->fb) {
501 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 502 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
502 set->crtc->base.id, set->fb->base.id, 503 set->crtc->base.id, set->fb->base.id,
503 (int)set->num_connectors, set->x, set->y); 504 (int)set->num_connectors, set->x, set->y);
504 } else { 505 } else {
505 DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", 506 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
506 set->crtc->base.id, (int)set->num_connectors, 507 set->mode = NULL;
507 set->x, set->y); 508 set->num_connectors = 0;
508 } 509 }
509 510
510 dev = set->crtc->dev; 511 dev = set->crtc->dev;
@@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
649 mode_changed = true; 650 mode_changed = true;
650 651
651 if (mode_changed) { 652 if (mode_changed) {
652 set->crtc->enabled = (set->mode != NULL); 653 set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
653 if (set->mode != NULL) { 654 if (set->crtc->enabled) {
654 DRM_DEBUG_KMS("attempting to set mode from" 655 DRM_DEBUG_KMS("attempting to set mode from"
655 " userspace\n"); 656 " userspace\n");
656 drm_mode_debug_printmodeline(set->mode); 657 drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
665 ret = -EINVAL; 666 ret = -EINVAL;
666 goto fail; 667 goto fail;
667 } 668 }
669 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
670 for (i = 0; i < set->num_connectors; i++) {
671 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
672 drm_get_connector_name(set->connectors[i]));
673 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
674 }
668 } 675 }
669 drm_helper_disable_unused_functions(dev); 676 drm_helper_disable_unused_functions(dev);
670 } else if (fb_changed) { 677 } else if (fb_changed) {
@@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
681 goto fail; 688 goto fail;
682 } 689 }
683 } 690 }
684 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
685 for (i = 0; i < set->num_connectors; i++) {
686 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
687 drm_get_connector_name(set->connectors[i]));
688 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
689 }
690 691
691 kfree(save_connectors); 692 kfree(save_connectors);
692 kfree(save_encoders); 693 kfree(save_encoders);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0307d601f5e5..6977a1ce9d98 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,25 +607,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
607} 607}
608EXPORT_SYMBOL(drm_fb_helper_fini); 608EXPORT_SYMBOL(drm_fb_helper_fini);
609 609
610void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
611{
612 info->fix.type = FB_TYPE_PACKED_PIXELS;
613 info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
614 FB_VISUAL_TRUECOLOR;
615 info->fix.mmio_start = 0;
616 info->fix.mmio_len = 0;
617 info->fix.type_aux = 0;
618 info->fix.xpanstep = 1; /* doing it in hw */
619 info->fix.ypanstep = 1; /* doing it in hw */
620 info->fix.ywrapstep = 0;
621 info->fix.accel = FB_ACCEL_NONE;
622 info->fix.type_aux = 0;
623
624 info->fix.line_length = fb->pitch;
625 return;
626}
627EXPORT_SYMBOL(drm_fb_helper_fill_fix);
628
629static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 610static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
630 u16 blue, u16 regno, struct fb_info *info) 611 u16 blue, u16 regno, struct fb_info *info)
631{ 612{
@@ -835,7 +816,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
835 mutex_unlock(&dev->mode_config.mutex); 816 mutex_unlock(&dev->mode_config.mutex);
836 return ret; 817 return ret;
837 } 818 }
838 drm_fb_helper_fill_fix(info, fb_helper->fb);
839 } 819 }
840 mutex_unlock(&dev->mode_config.mutex); 820 mutex_unlock(&dev->mode_config.mutex);
841 821
@@ -973,7 +953,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
973 953
974 if (new_fb) { 954 if (new_fb) {
975 info->var.pixclock = 0; 955 info->var.pixclock = 0;
976 drm_fb_helper_fill_fix(info, fb_helper->fb);
977 if (register_framebuffer(info) < 0) { 956 if (register_framebuffer(info) < 0) {
978 return -EINVAL; 957 return -EINVAL;
979 } 958 }
@@ -1000,6 +979,26 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1000} 979}
1001EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); 980EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
1002 981
982void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
983 uint32_t depth)
984{
985 info->fix.type = FB_TYPE_PACKED_PIXELS;
986 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
987 FB_VISUAL_TRUECOLOR;
988 info->fix.mmio_start = 0;
989 info->fix.mmio_len = 0;
990 info->fix.type_aux = 0;
991 info->fix.xpanstep = 1; /* doing it in hw */
992 info->fix.ypanstep = 1; /* doing it in hw */
993 info->fix.ywrapstep = 0;
994 info->fix.accel = FB_ACCEL_NONE;
995 info->fix.type_aux = 0;
996
997 info->fix.line_length = pitch;
998 return;
999}
1000EXPORT_SYMBOL(drm_fb_helper_fill_fix);
1001
1003void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 1002void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
1004 uint32_t fb_width, uint32_t fb_height) 1003 uint32_t fb_width, uint32_t fb_height)
1005{ 1004{
@@ -1534,11 +1533,11 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1534} 1533}
1535EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 1534EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1536 1535
1537/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED) 1536/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
1538 * but the module doesn't depend on any fb console symbols. At least 1537 * but the module doesn't depend on any fb console symbols. At least
1539 * attempt to load fbcon to avoid leaving the system without a usable console. 1538 * attempt to load fbcon to avoid leaving the system without a usable console.
1540 */ 1539 */
1541#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED) 1540#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
1542static int __init drm_fb_helper_modinit(void) 1541static int __init drm_fb_helper_modinit(void)
1543{ 1542{
1544 const char *name = "fbcon"; 1543 const char *name = "fbcon";
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0054e957203f..3dadfa2a8528 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1250,7 +1250,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1250 * Drivers should call this routine in their vblank interrupt handlers to 1250 * Drivers should call this routine in their vblank interrupt handlers to
1251 * update the vblank counter and send any signals that may be pending. 1251 * update the vblank counter and send any signals that may be pending.
1252 */ 1252 */
1253void drm_handle_vblank(struct drm_device *dev, int crtc) 1253bool drm_handle_vblank(struct drm_device *dev, int crtc)
1254{ 1254{
1255 u32 vblcount; 1255 u32 vblcount;
1256 s64 diff_ns; 1256 s64 diff_ns;
@@ -1258,7 +1258,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1258 unsigned long irqflags; 1258 unsigned long irqflags;
1259 1259
1260 if (!dev->num_crtcs) 1260 if (!dev->num_crtcs)
1261 return; 1261 return false;
1262 1262
1263 /* Need timestamp lock to prevent concurrent execution with 1263 /* Need timestamp lock to prevent concurrent execution with
1264 * vblank enable/disable, as this would cause inconsistent 1264 * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1269,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1269 /* Vblank irq handling disabled. Nothing to do. */ 1269 /* Vblank irq handling disabled. Nothing to do. */
1270 if (!dev->vblank_enabled[crtc]) { 1270 if (!dev->vblank_enabled[crtc]) {
1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1272 return; 1272 return false;
1273 } 1273 }
1274 1274
1275 /* Fetch corresponding timestamp for this vblank interval from 1275 /* Fetch corresponding timestamp for this vblank interval from
@@ -1311,5 +1311,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1311 drm_handle_vblank_events(dev, crtc); 1311 drm_handle_vblank_events(dev, crtc);
1312 1312
1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1314 return true;
1314} 1315}
1315EXPORT_SYMBOL(drm_handle_vblank); 1316EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 19a3d58044dd..3601466c5502 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -703,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
703 seq_printf(m, "%s [%d]:\n", name, count); 703 seq_printf(m, "%s [%d]:\n", name, count);
704 704
705 while (count--) { 705 while (count--) {
706 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s", 706 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
707 err->gtt_offset, 707 err->gtt_offset,
708 err->size, 708 err->size,
709 err->read_domains, 709 err->read_domains,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b04..17bd766f2081 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
152{ 152{
153 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 155 int ret;
156 156
157 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
158 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
163 } 163 }
164 164
165 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
166 if (ring->obj != NULL) { 166 if (LP_RING(dev_priv)->obj != NULL) {
167 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
168 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
169 "GEM mode\n"); 169 "GEM mode\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 ring->size = init->ring_size; 173 ret = intel_render_ring_init_dri(dev,
174 174 init->ring_start,
175 ring->map.offset = init->ring_start; 175 init->ring_size);
176 ring->map.size = init->ring_size; 176 if (ret) {
177 ring->map.type = 0;
178 ring->map.flags = 0;
179 ring->map.mtrr = 0;
180
181 drm_core_ioremap_wc(&ring->map, dev);
182
183 if (ring->map.handle == NULL) {
184 i915_dma_cleanup(dev); 177 i915_dma_cleanup(dev);
185 DRM_ERROR("can not ioremap virtual address for" 178 return ret;
186 " ring buffer\n");
187 return -ENOMEM;
188 } 179 }
189 } 180 }
190 181
191 ring->virtual_start = ring->map.handle;
192
193 dev_priv->cpp = init->cpp; 182 dev_priv->cpp = init->cpp;
194 dev_priv->back_offset = init->back_offset; 183 dev_priv->back_offset = init->back_offset;
195 dev_priv->front_offset = init->front_offset; 184 dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1226 if (ret) 1215 if (ret)
1227 DRM_INFO("failed to find VBIOS tables\n"); 1216 DRM_INFO("failed to find VBIOS tables\n");
1228 1217
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1218 /* If we have > 1 VGA cards, then we need to arbitrate access
1219 * to the common VGA resources.
1220 *
1221 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1222 * then we do not take part in VGA arbitration and the
1223 * vga_client_register() fails with -ENODEV.
1224 */
1230 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1225 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1231 if (ret) 1226 if (ret && ret != -ENODEV)
1232 goto cleanup_ringbuffer; 1227 goto cleanup_ringbuffer;
1233 1228
1234 intel_register_dsm_handler(); 1229 intel_register_dsm_handler();
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0de75a23f8e7..cfb56d0ff367 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 51
52unsigned int i915_panel_use_ssc = 1;
53module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
54
52bool i915_try_reset = true; 55bool i915_try_reset = true;
53module_param_named(reset, i915_try_reset, bool, 0600); 56module_param_named(reset, i915_try_reset, bool, 0600);
54 57
@@ -57,7 +60,7 @@ extern int intel_agp_enabled;
57 60
58#define INTEL_VGA_DEVICE(id, info) { \ 61#define INTEL_VGA_DEVICE(id, info) { \
59 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 62 .class = PCI_CLASS_DISPLAY_VGA << 8, \
60 .class_mask = 0xffff00, \ 63 .class_mask = 0xff0000, \
61 .vendor = 0x8086, \ 64 .vendor = 0x8086, \
62 .device = id, \ 65 .device = id, \
63 .subvendor = PCI_ANY_ID, \ 66 .subvendor = PCI_ANY_ID, \
@@ -351,6 +354,7 @@ static int i915_drm_thaw(struct drm_device *dev)
351 error = i915_gem_init_ringbuffer(dev); 354 error = i915_gem_init_ringbuffer(dev);
352 mutex_unlock(&dev->struct_mutex); 355 mutex_unlock(&dev->struct_mutex);
353 356
357 drm_mode_config_reset(dev);
354 drm_irq_install(dev); 358 drm_irq_install(dev);
355 359
356 /* Resume the modeset for every activated CRTC */ 360 /* Resume the modeset for every activated CRTC */
@@ -539,6 +543,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
539 543
540 mutex_unlock(&dev->struct_mutex); 544 mutex_unlock(&dev->struct_mutex);
541 drm_irq_uninstall(dev); 545 drm_irq_uninstall(dev);
546 drm_mode_config_reset(dev);
542 drm_irq_install(dev); 547 drm_irq_install(dev);
543 mutex_lock(&dev->struct_mutex); 548 mutex_lock(&dev->struct_mutex);
544 } 549 }
@@ -563,6 +568,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
563static int __devinit 568static int __devinit
564i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 569i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
565{ 570{
571 /* Only bind to function 0 of the device. Early generations
572 * used function 1 as a placeholder for multi-head. This causes
573 * us confusion instead, especially on the systems where both
574 * functions have the same PCI-ID!
575 */
576 if (PCI_FUNC(pdev->devfn))
577 return -ENODEV;
578
566 return drm_get_pci_dev(pdev, ent, &driver); 579 return drm_get_pci_dev(pdev, ent, &driver);
567} 580}
568 581
@@ -749,6 +762,9 @@ static int __init i915_init(void)
749 driver.driver_features &= ~DRIVER_MODESET; 762 driver.driver_features &= ~DRIVER_MODESET;
750#endif 763#endif
751 764
765 if (!(driver.driver_features & DRIVER_MODESET))
766 driver.get_vblank_timestamp = NULL;
767
752 return drm_init(&driver); 768 return drm_init(&driver);
753} 769}
754 770
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 385fc7ec39d3..a0149c619cdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
543 /** List of all objects in gtt_space. Used to restore gtt 543 /** List of all objects in gtt_space. Used to restore gtt
544 * mappings on resume */ 544 * mappings on resume */
545 struct list_head gtt_list; 545 struct list_head gtt_list;
546 /** End of mappable part of GTT */ 546
547 /** Usable portion of the GTT for GEM */
548 unsigned long gtt_start;
547 unsigned long gtt_mappable_end; 549 unsigned long gtt_mappable_end;
550 unsigned long gtt_end;
548 551
549 struct io_mapping *gtt_mapping; 552 struct io_mapping *gtt_mapping;
550 int gtt_mtrr; 553 int gtt_mtrr;
@@ -954,6 +957,7 @@ extern int i915_max_ioctl;
954extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
955extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
956extern unsigned int i915_lvds_downclock; 959extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc;
957 961
958extern int i915_suspend(struct drm_device *dev, pm_message_t state); 962extern int i915_suspend(struct drm_device *dev, pm_message_t state);
959extern int i915_resume(struct drm_device *dev); 963extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff755..cf4f74c7c6fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
140{ 140{
141 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
142 142
143 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
144 end - start);
145 144
145 dev_priv->mm.gtt_start = start;
146 dev_priv->mm.gtt_mappable_end = mappable_end;
147 dev_priv->mm.gtt_end = end;
146 dev_priv->mm.gtt_total = end - start; 148 dev_priv->mm.gtt_total = end - start;
147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 149 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
148 dev_priv->mm.gtt_mappable_end = mappable_end; 150
151 /* Take over this portion of the GTT */
152 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
149} 153}
150 154
151int 155int
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1857 1861
1858 seqno = ring->get_seqno(ring); 1862 seqno = ring->get_seqno(ring);
1859 1863
1860 for (i = 0; i < I915_NUM_RINGS; i++) 1864 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1861 if (seqno >= ring->sync_seqno[i]) 1865 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0; 1866 ring->sync_seqno[i] = 0;
1863 1867
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e69834341ef0..d2f445e825f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -464,8 +464,6 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
464 int ret; 464 int ret;
465 465
466 list_for_each_entry(obj, objects, exec_list) { 466 list_for_each_entry(obj, objects, exec_list) {
467 obj->base.pending_read_domains = 0;
468 obj->base.pending_write_domain = 0;
469 ret = i915_gem_execbuffer_relocate_object(obj, eb); 467 ret = i915_gem_execbuffer_relocate_object(obj, eb);
470 if (ret) 468 if (ret)
471 return ret; 469 return ret;
@@ -505,6 +503,9 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
505 list_move(&obj->exec_list, &ordered_objects); 503 list_move(&obj->exec_list, &ordered_objects);
506 else 504 else
507 list_move_tail(&obj->exec_list, &ordered_objects); 505 list_move_tail(&obj->exec_list, &ordered_objects);
506
507 obj->base.pending_read_domains = 0;
508 obj->base.pending_write_domain = 0;
508 } 509 }
509 list_splice(&ordered_objects, objects); 510 list_splice(&ordered_objects, objects);
510 511
@@ -636,6 +637,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
636{ 637{
637 struct drm_i915_gem_relocation_entry *reloc; 638 struct drm_i915_gem_relocation_entry *reloc;
638 struct drm_i915_gem_object *obj; 639 struct drm_i915_gem_object *obj;
640 int *reloc_offset;
639 int i, total, ret; 641 int i, total, ret;
640 642
641 /* We may process another execbuffer during the unlock... */ 643 /* We may process another execbuffer during the unlock... */
@@ -653,8 +655,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
653 for (i = 0; i < count; i++) 655 for (i = 0; i < count; i++)
654 total += exec[i].relocation_count; 656 total += exec[i].relocation_count;
655 657
658 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
656 reloc = drm_malloc_ab(total, sizeof(*reloc)); 659 reloc = drm_malloc_ab(total, sizeof(*reloc));
657 if (reloc == NULL) { 660 if (reloc == NULL || reloc_offset == NULL) {
661 drm_free_large(reloc);
662 drm_free_large(reloc_offset);
658 mutex_lock(&dev->struct_mutex); 663 mutex_lock(&dev->struct_mutex);
659 return -ENOMEM; 664 return -ENOMEM;
660 } 665 }
@@ -672,6 +677,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
672 goto err; 677 goto err;
673 } 678 }
674 679
680 reloc_offset[i] = total;
675 total += exec[i].relocation_count; 681 total += exec[i].relocation_count;
676 } 682 }
677 683
@@ -705,17 +711,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
705 if (ret) 711 if (ret)
706 goto err; 712 goto err;
707 713
708 total = 0;
709 list_for_each_entry(obj, objects, exec_list) { 714 list_for_each_entry(obj, objects, exec_list) {
710 obj->base.pending_read_domains = 0; 715 int offset = obj->exec_entry - exec;
711 obj->base.pending_write_domain = 0;
712 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 716 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
713 reloc + total); 717 reloc + reloc_offset[offset]);
714 if (ret) 718 if (ret)
715 goto err; 719 goto err;
716
717 total += exec->relocation_count;
718 exec++;
719 } 720 }
720 721
721 /* Leave the user relocations as are, this is the painfully slow path, 722 /* Leave the user relocations as are, this is the painfully slow path,
@@ -726,6 +727,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
726 727
727err: 728err:
728 drm_free_large(reloc); 729 drm_free_large(reloc);
730 drm_free_large(reloc_offset);
729 return ret; 731 return ret;
730} 732}
731 733
@@ -770,7 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
770 if (from == NULL || to == from) 772 if (from == NULL || to == from)
771 return 0; 773 return 0;
772 774
773 if (INTEL_INFO(obj->base.dev)->gen < 6) 775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
774 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
775 778
776 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
@@ -1172,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1172 goto err; 1175 goto err;
1173 1176
1174 seqno = i915_gem_next_request_seqno(dev, ring); 1177 seqno = i915_gem_next_request_seqno(dev, ring);
1175 for (i = 0; i < I915_NUM_RINGS-1; i++) { 1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1176 if (seqno < ring->sync_seqno[i]) { 1179 if (seqno < ring->sync_seqno[i]) {
1177 /* The GPU can not handle its semaphore value wrapping, 1180 /* The GPU can not handle its semaphore value wrapping,
1178 * so every billion or so execbuffers, we need to stall 1181 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac8..b0abdc64aa9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj; 35 struct drm_i915_gem_object *obj;
36 36
37 /* First fill our portion of the GTT with scratch pages */
38 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj); 42 i915_gem_clflush_object(obj);
39 43
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e418e8bb61e6..97f946dcc1aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
274 return ret; 274 return ret;
275} 275}
276 276
277int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, 277int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
278 int *max_error, 278 int *max_error,
279 struct timeval *vblank_time, 279 struct timeval *vblank_time,
280 unsigned flags) 280 unsigned flags)
281{ 281{
282 struct drm_crtc *drmcrtc; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct drm_crtc *crtc;
283 284
284 if (crtc < 0 || crtc >= dev->num_crtcs) { 285 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
285 DRM_ERROR("Invalid crtc %d\n", crtc); 286 DRM_ERROR("Invalid crtc %d\n", pipe);
286 return -EINVAL; 287 return -EINVAL;
287 } 288 }
288 289
289 /* Get drm_crtc to timestamp: */ 290 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc); 291 crtc = intel_get_crtc_for_pipe(dev, pipe);
292 if (crtc == NULL) {
293 DRM_ERROR("Invalid crtc %d\n", pipe);
294 return -EINVAL;
295 }
296
297 if (!crtc->enabled) {
298 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
299 return -EBUSY;
300 }
291 301
292 /* Helper routine in DRM core does all the work: */ 302 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 303 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
294 vblank_time, flags, drmcrtc); 304 vblank_time, flags,
305 crtc);
295} 306}
296 307
297/* 308/*
@@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring) 359 struct intel_ring_buffer *ring)
349{ 360{
350 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring); 362 u32 seqno;
352 363
364 if (ring->obj == NULL)
365 return;
366
367 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno); 368 trace_i915_gem_request_complete(dev, seqno);
354 369
355 ring->irq_seqno = seqno; 370 ring->irq_seqno = seqno;
@@ -720,7 +735,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
720 if (obj->ring != ring) 735 if (obj->ring != ring)
721 continue; 736 continue;
722 737
723 if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) 738 if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
724 continue; 739 continue;
725 740
726 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 741 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev)
831 i++; 846 i++;
832 error->pinned_bo_count = i - error->active_bo_count; 847 error->pinned_bo_count = i - error->active_bo_count;
833 848
849 error->active_bo = NULL;
850 error->pinned_bo = NULL;
834 if (i) { 851 if (i) {
835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 852 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
836 GFP_ATOMIC); 853 GFP_ATOMIC);
@@ -1179,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1179 intel_finish_page_flip_plane(dev, 1); 1196 intel_finish_page_flip_plane(dev, 1);
1180 } 1197 }
1181 1198
1182 if (pipea_stats & vblank_status) { 1199 if (pipea_stats & vblank_status &&
1200 drm_handle_vblank(dev, 0)) {
1183 vblank++; 1201 vblank++;
1184 drm_handle_vblank(dev, 0);
1185 if (!dev_priv->flip_pending_is_done) { 1202 if (!dev_priv->flip_pending_is_done) {
1186 i915_pageflip_stall_check(dev, 0); 1203 i915_pageflip_stall_check(dev, 0);
1187 intel_finish_page_flip(dev, 0); 1204 intel_finish_page_flip(dev, 0);
1188 } 1205 }
1189 } 1206 }
1190 1207
1191 if (pipeb_stats & vblank_status) { 1208 if (pipeb_stats & vblank_status &&
1209 drm_handle_vblank(dev, 1)) {
1192 vblank++; 1210 vblank++;
1193 drm_handle_vblank(dev, 1);
1194 if (!dev_priv->flip_pending_is_done) { 1211 if (!dev_priv->flip_pending_is_done) {
1195 i915_pageflip_stall_check(dev, 1); 1212 i915_pageflip_stall_check(dev, 1);
1196 intel_finish_page_flip(dev, 1); 1213 intel_finish_page_flip(dev, 1);
@@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1278 if (master_priv->sarea_priv) 1295 if (master_priv->sarea_priv)
1279 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1296 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 1297
1281 ret = -ENODEV;
1282 if (ring->irq_get(ring)) { 1298 if (ring->irq_get(ring)) {
1283 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1299 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1284 READ_BREADCRUMB(dev_priv) >= irq_nr); 1300 READ_BREADCRUMB(dev_priv) >= irq_nr);
1285 ring->irq_put(ring); 1301 ring->irq_put(ring);
1286 } 1302 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1303 ret = -EBUSY;
1287 1304
1288 if (ret == -EBUSY) { 1305 if (ret == -EBUSY) {
1289 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1306 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f61..5cfc68940f17 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -513,6 +513,10 @@
513#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 513#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
515 515
516#define GEN6_BLITTER_ECOSKPD 0x221d0
517#define GEN6_BLITTER_LOCK_SHIFT 16
518#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
519
516#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 520#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
517#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) 521#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
518#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) 522#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -2626,6 +2630,8 @@
2626#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2630#define DISPLAY_PORT_PLL_BIOS_2 0x46014
2627 2631
2628#define PCH_DSPCLK_GATE_D 0x42020 2632#define PCH_DSPCLK_GATE_D 0x42020
2633# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2634# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2629# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) 2635# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2630# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) 2636# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2631 2637
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b0b1200ed650..0b44956c336b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -264,17 +264,12 @@ parse_general_features(struct drm_i915_private *dev_priv,
264 dev_priv->int_crt_support = general->int_crt_support; 264 dev_priv->int_crt_support = general->int_crt_support;
265 dev_priv->lvds_use_ssc = general->enable_ssc; 265 dev_priv->lvds_use_ssc = general->enable_ssc;
266 266
267 if (dev_priv->lvds_use_ssc) { 267 if (IS_I85X(dev))
268 if (IS_I85X(dev)) 268 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
269 dev_priv->lvds_ssc_freq = 269 else if (IS_GEN5(dev) || IS_GEN6(dev))
270 general->ssc_freq ? 66 : 48; 270 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
271 else if (IS_GEN5(dev) || IS_GEN6(dev)) 271 else
272 dev_priv->lvds_ssc_freq = 272 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
273 general->ssc_freq ? 100 : 120;
274 else
275 dev_priv->lvds_ssc_freq =
276 general->ssc_freq ? 100 : 96;
277 }
278 } 273 }
279} 274}
280 275
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee46..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
535 return 0; 535 return 0;
536} 536}
537 537
538static void intel_crt_reset(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct intel_crt *crt = intel_attached_crt(connector);
542
543 if (HAS_PCH_SPLIT(dev))
544 crt->force_hotplug_required = 1;
545}
546
538/* 547/*
539 * Routines for controlling stuff on the analog port 548 * Routines for controlling stuff on the analog port
540 */ 549 */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
548}; 557};
549 558
550static const struct drm_connector_funcs intel_crt_connector_funcs = { 559static const struct drm_connector_funcs intel_crt_connector_funcs = {
560 .reset = intel_crt_reset,
551 .dpms = drm_helper_connector_dpms, 561 .dpms = drm_helper_connector_dpms,
552 .detect = intel_crt_detect, 562 .detect = intel_crt_detect,
553 .fill_modes = drm_helper_probe_single_connector_modes, 563 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 25d96889d7d2..7e42aa586504 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -3822,6 +3843,11 @@ static void intel_update_watermarks(struct drm_device *dev)
3822 sr_hdisplay, sr_htotal, pixel_size); 3843 sr_hdisplay, sr_htotal, pixel_size);
3823} 3844}
3824 3845
3846static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3847{
3848 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
3849}
3850
3825static int intel_crtc_mode_set(struct drm_crtc *crtc, 3851static int intel_crtc_mode_set(struct drm_crtc *crtc,
3826 struct drm_display_mode *mode, 3852 struct drm_display_mode *mode,
3827 struct drm_display_mode *adjusted_mode, 3853 struct drm_display_mode *adjusted_mode,
@@ -3884,7 +3910,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3884 num_connectors++; 3910 num_connectors++;
3885 } 3911 }
3886 3912
3887 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { 3913 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3888 refclk = dev_priv->lvds_ssc_freq * 1000; 3914 refclk = dev_priv->lvds_ssc_freq * 1000;
3889 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3915 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3890 refclk / 1000); 3916 refclk / 1000);
@@ -4059,7 +4085,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4059 udelay(200); 4085 udelay(200);
4060 4086
4061 if (has_edp_encoder) { 4087 if (has_edp_encoder) {
4062 if (dev_priv->lvds_use_ssc) { 4088 if (intel_panel_use_ssc(dev_priv)) {
4063 temp |= DREF_SSC1_ENABLE; 4089 temp |= DREF_SSC1_ENABLE;
4064 I915_WRITE(PCH_DREF_CONTROL, temp); 4090 I915_WRITE(PCH_DREF_CONTROL, temp);
4065 4091
@@ -4070,13 +4096,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4070 4096
4071 /* Enable CPU source on CPU attached eDP */ 4097 /* Enable CPU source on CPU attached eDP */
4072 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4098 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4073 if (dev_priv->lvds_use_ssc) 4099 if (intel_panel_use_ssc(dev_priv))
4074 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4100 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4075 else 4101 else
4076 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4102 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4077 } else { 4103 } else {
4078 /* Enable SSC on PCH eDP if needed */ 4104 /* Enable SSC on PCH eDP if needed */
4079 if (dev_priv->lvds_use_ssc) { 4105 if (intel_panel_use_ssc(dev_priv)) {
4080 DRM_ERROR("enabling SSC on PCH\n"); 4106 DRM_ERROR("enabling SSC on PCH\n");
4081 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; 4107 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
4082 } 4108 }
@@ -4104,7 +4130,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4104 int factor = 21; 4130 int factor = 21;
4105 4131
4106 if (is_lvds) { 4132 if (is_lvds) {
4107 if ((dev_priv->lvds_use_ssc && 4133 if ((intel_panel_use_ssc(dev_priv) &&
4108 dev_priv->lvds_ssc_freq == 100) || 4134 dev_priv->lvds_ssc_freq == 100) ||
4109 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4135 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4110 factor = 25; 4136 factor = 25;
@@ -4183,7 +4209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4183 /* XXX: just matching BIOS for now */ 4209 /* XXX: just matching BIOS for now */
4184 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4210 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4185 dpll |= 3; 4211 dpll |= 3;
4186 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) 4212 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4187 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4213 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4188 else 4214 else
4189 dpll |= PLL_REF_INPUT_DREFCLK; 4215 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -5525,6 +5551,18 @@ cleanup_work:
5525 return ret; 5551 return ret;
5526} 5552}
5527 5553
5554static void intel_crtc_reset(struct drm_crtc *crtc)
5555{
5556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5557
5558 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset.
5560 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564}
5565
5528static struct drm_crtc_helper_funcs intel_helper_funcs = { 5566static struct drm_crtc_helper_funcs intel_helper_funcs = {
5529 .dpms = intel_crtc_dpms, 5567 .dpms = intel_crtc_dpms,
5530 .mode_fixup = intel_crtc_mode_fixup, 5568 .mode_fixup = intel_crtc_mode_fixup,
@@ -5536,6 +5574,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5536}; 5574};
5537 5575
5538static const struct drm_crtc_funcs intel_crtc_funcs = { 5576static const struct drm_crtc_funcs intel_crtc_funcs = {
5577 .reset = intel_crtc_reset,
5539 .cursor_set = intel_crtc_cursor_set, 5578 .cursor_set = intel_crtc_cursor_set,
5540 .cursor_move = intel_crtc_cursor_move, 5579 .cursor_move = intel_crtc_cursor_move,
5541 .gamma_set = intel_crtc_gamma_set, 5580 .gamma_set = intel_crtc_gamma_set,
@@ -5626,9 +5665,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5626 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5665 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5627 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5628 5667
5629 intel_crtc->cursor_addr = 0; 5668 intel_crtc_reset(&intel_crtc->base);
5630 intel_crtc->dpms_mode = -1;
5631 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5632 5669
5633 if (HAS_PCH_SPLIT(dev)) { 5670 if (HAS_PCH_SPLIT(dev)) {
5634 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5671 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6281,7 +6318,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6281 6318
6282 if (IS_GEN5(dev)) { 6319 if (IS_GEN5(dev)) {
6283 /* Required for FBC */ 6320 /* Required for FBC */
6284 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6321 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6322 DPFCRUNIT_CLOCK_GATE_DISABLE |
6323 DPFDUNIT_CLOCK_GATE_DISABLE;
6285 /* Required for CxSR */ 6324 /* Required for CxSR */
6286 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6325 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6287 6326
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ee145a257287..512782728e51 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,6 +148,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
148 148
149// memset(info->screen_base, 0, size); 149// memset(info->screen_base, 0, size);
150 150
151 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
151 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 152 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
152 153
153 info->pixmap.size = 64*1024; 154 info->pixmap.size = 64*1024;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8f4f6bd33ee9..ace8d5d30dd2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -704,6 +704,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
704 }, 704 },
705 { 705 {
706 .callback = intel_no_lvds_dmi_callback, 706 .callback = intel_no_lvds_dmi_callback,
707 .ident = "AOpen i915GMm-HFS",
708 .matches = {
709 DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
710 DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
711 },
712 },
713 {
714 .callback = intel_no_lvds_dmi_callback,
707 .ident = "Aopen i945GTt-VFA", 715 .ident = "Aopen i945GTt-VFA",
708 .matches = { 716 .matches = {
709 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 717 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf9..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/acpi_io.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31#include "drmP.h" 32#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
476 return -ENOTSUPP; 477 return -ENOTSUPP;
477 } 478 }
478 479
479 base = ioremap(asls, OPREGION_SIZE); 480 base = acpi_os_ioremap(asls, OPREGION_SIZE);
480 if (!base) 481 if (!base)
481 return -ENOMEM; 482 return -ENOMEM;
482 483
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e00d200df3db..c65992df458d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -278,6 +278,6 @@ void intel_panel_setup_backlight(struct drm_device *dev)
278{ 278{
279 struct drm_i915_private *dev_priv = dev->dev_private; 279 struct drm_i915_private *dev_priv = dev->dev_private;
280 280
281 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 281 dev_priv->backlight_level = intel_panel_get_backlight(dev);
282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0; 282 dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
283} 283}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03e337072517..6218fa97aa1e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,7 +927,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
@@ -933,20 +939,22 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
933 unsigned long end; 939 unsigned long end;
934 u32 head; 940 u32 head;
935 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
936 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
937 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
938 do { 955 do {
939 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
940 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
941 */
942 head = intel_read_status_page(ring, 4);
943 if (head < ring->actual_head)
944 head = I915_READ_HEAD(ring);
945 ring->actual_head = head;
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -1291,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1291 return intel_init_ring_buffer(dev, ring); 1299 return intel_init_ring_buffer(dev, ring);
1292} 1300}
1293 1301
1302int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1303{
1304 drm_i915_private_t *dev_priv = dev->dev_private;
1305 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1306
1307 *ring = render_ring;
1308 if (INTEL_INFO(dev)->gen >= 6) {
1309 ring->add_request = gen6_add_request;
1310 ring->irq_get = gen6_render_ring_get_irq;
1311 ring->irq_put = gen6_render_ring_put_irq;
1312 } else if (IS_GEN5(dev)) {
1313 ring->add_request = pc_render_add_request;
1314 ring->get_seqno = pc_render_get_seqno;
1315 }
1316
1317 ring->dev = dev;
1318 INIT_LIST_HEAD(&ring->active_list);
1319 INIT_LIST_HEAD(&ring->request_list);
1320 INIT_LIST_HEAD(&ring->gpu_write_list);
1321
1322 ring->size = size;
1323 ring->effective_size = ring->size;
1324 if (IS_I830(ring->dev))
1325 ring->effective_size -= 128;
1326
1327 ring->map.offset = start;
1328 ring->map.size = size;
1329 ring->map.type = 0;
1330 ring->map.flags = 0;
1331 ring->map.mtrr = 0;
1332
1333 drm_core_ioremap_wc(&ring->map, dev);
1334 if (ring->map.handle == NULL) {
1335 DRM_ERROR("can not ioremap virtual address for"
1336 " ring buffer\n");
1337 return -ENOMEM;
1338 }
1339
1340 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1341 return 0;
1342}
1343
1294int intel_init_bsd_ring_buffer(struct drm_device *dev) 1344int intel_init_bsd_ring_buffer(struct drm_device *dev)
1295{ 1345{
1296 drm_i915_private_t *dev_priv = dev->dev_private; 1346 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index be9087e4c9be..6d6fde85a636 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -47,7 +47,6 @@ struct intel_ring_buffer {
47 struct drm_device *dev; 47 struct drm_device *dev;
48 struct drm_i915_gem_object *obj; 48 struct drm_i915_gem_object *obj;
49 49
50 u32 actual_head;
51 u32 head; 50 u32 head;
52 u32 tail; 51 u32 tail;
53 int space; 52 int space;
@@ -167,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
167u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
168void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 167void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
169 168
169/* DRI warts */
170int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
171
170#endif /* _INTEL_RINGBUFFER_H_ */ 172#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a37..6a09c1413d60 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -473,20 +473,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
473 return false; 473 return false;
474 } 474 }
475 475
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true; 476 return true;
491} 477}
492 478
@@ -497,6 +483,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
497 u8 status; 483 u8 status;
498 int i; 484 int i;
499 485
486 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
487
500 /* 488 /*
501 * The documentation states that all commands will be 489 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll 490 * processed within 15µs, and that we need only poll
@@ -505,14 +493,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
505 * 493 *
506 * Check 5 times in case the hardware failed to read the docs. 494 * Check 5 times in case the hardware failed to read the docs.
507 */ 495 */
508 do { 496 if (!intel_sdvo_read_byte(intel_sdvo,
497 SDVO_I2C_CMD_STATUS,
498 &status))
499 goto log_fail;
500
501 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
502 udelay(15);
509 if (!intel_sdvo_read_byte(intel_sdvo, 503 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS, 504 SDVO_I2C_CMD_STATUS,
511 &status)) 505 &status))
512 return false; 506 goto log_fail;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry); 507 }
514 508
515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 509 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
517 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 510 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
518 else 511 else
@@ -533,7 +526,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
533 return true; 526 return true;
534 527
535log_fail: 528log_fail:
536 DRM_LOG_KMS("\n"); 529 DRM_LOG_KMS("... failed\n");
537 return false; 530 return false;
538} 531}
539 532
@@ -550,6 +543,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, 543static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
551 u8 ddc_bus) 544 u8 ddc_bus)
552{ 545{
546 /* This must be the immediately preceding write before the i2c xfer */
553 return intel_sdvo_write_cmd(intel_sdvo, 547 return intel_sdvo_write_cmd(intel_sdvo,
554 SDVO_CMD_SET_CONTROL_BUS_SWITCH, 548 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &ddc_bus, 1); 549 &ddc_bus, 1);
@@ -557,7 +551,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
557 551
558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 552static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
559{ 553{
560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); 554 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
555 return false;
556
557 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
561} 558}
562 559
563static bool 560static bool
@@ -859,18 +856,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
859 856
860 intel_dip_infoframe_csum(&avi_if); 857 intel_dip_infoframe_csum(&avi_if);
861 858
862 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, 859 if (!intel_sdvo_set_value(intel_sdvo,
860 SDVO_CMD_SET_HBUF_INDEX,
863 set_buf_index, 2)) 861 set_buf_index, 2))
864 return false; 862 return false;
865 863
866 for (i = 0; i < sizeof(avi_if); i += 8) { 864 for (i = 0; i < sizeof(avi_if); i += 8) {
867 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, 865 if (!intel_sdvo_set_value(intel_sdvo,
866 SDVO_CMD_SET_HBUF_DATA,
868 data, 8)) 867 data, 8))
869 return false; 868 return false;
870 data++; 869 data++;
871 } 870 }
872 871
873 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, 872 return intel_sdvo_set_value(intel_sdvo,
873 SDVO_CMD_SET_HBUF_TXRATE,
874 &tx_rate, 1); 874 &tx_rate, 1);
875} 875}
876 876
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 21d6c29c2d21..de70959b9ed5 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,7 +8,7 @@ config DRM_NOUVEAU
8 select FB_CFB_COPYAREA 8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT 9 select FB_CFB_IMAGEBLIT
10 select FB 10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED 11 select FRAMEBUFFER_CONSOLE if !EXPERT
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 help 14 help
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2aef5cd3acf5..49e5e99917e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6310,6 +6310,9 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
6310static bool 6310static bool
6311apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) 6311apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6312{ 6312{
6313 struct drm_nouveau_private *dev_priv = dev->dev_private;
6314 struct dcb_table *dcb = &dev_priv->vbios.dcb;
6315
6313 /* Dell Precision M6300 6316 /* Dell Precision M6300
6314 * DCB entry 2: 02025312 00000010 6317 * DCB entry 2: 02025312 00000010
6315 * DCB entry 3: 02026312 00000020 6318 * DCB entry 3: 02026312 00000020
@@ -6327,6 +6330,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6327 return false; 6330 return false;
6328 } 6331 }
6329 6332
6333 /* GeForce3 Ti 200
6334 *
6335 * DCB reports an LVDS output that should be TMDS:
6336 * DCB entry 1: f2005014 ffffffff
6337 */
6338 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
6339 if (*conn == 0xf2005014 && *conf == 0xffffffff) {
6340 fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
6341 return false;
6342 }
6343 }
6344
6330 return true; 6345 return true;
6331} 6346}
6332 6347
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 13bb672a16f4..f658a04eecf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -234,9 +234,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
234 pci_set_power_state(pdev, PCI_D3hot); 234 pci_set_power_state(pdev, PCI_D3hot);
235 } 235 }
236 236
237 acquire_console_sem(); 237 console_lock();
238 nouveau_fbcon_set_suspend(dev, 1); 238 nouveau_fbcon_set_suspend(dev, 1);
239 release_console_sem(); 239 console_unlock();
240 nouveau_fbcon_restore_accel(dev); 240 nouveau_fbcon_restore_accel(dev);
241 return 0; 241 return 0;
242 242
@@ -359,9 +359,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
359 nv_crtc->lut.depth = 0; 359 nv_crtc->lut.depth = 0;
360 } 360 }
361 361
362 acquire_console_sem(); 362 console_lock();
363 nouveau_fbcon_set_suspend(dev, 0); 363 nouveau_fbcon_set_suspend(dev, 0);
364 release_console_sem(); 364 console_unlock();
365 365
366 nouveau_fbcon_zfill_all(dev); 366 nouveau_fbcon_zfill_all(dev);
367 367
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 46e32573b3a3..9821fcacc3d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -160,6 +160,7 @@ enum nouveau_flags {
160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
161#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 161#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
162#define NVOBJ_FLAG_VM (1 << 3) 162#define NVOBJ_FLAG_VM (1 << 3)
163#define NVOBJ_FLAG_VM_USER (1 << 4)
163 164
164#define NVOBJ_CINST_GLOBAL 0xdeadbeef 165#define NVOBJ_CINST_GLOBAL 0xdeadbeef
165 166
@@ -847,9 +848,6 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
847 struct nouveau_fence *fence); 848 struct nouveau_fence *fence);
848extern const struct ttm_mem_type_manager_func nouveau_vram_manager; 849extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
849 850
850/* nvc0_vram.c */
851extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
852
853/* nouveau_notifier.c */ 851/* nouveau_notifier.c */
854extern int nouveau_notifier_init_channel(struct nouveau_channel *); 852extern int nouveau_notifier_init_channel(struct nouveau_channel *);
855extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
@@ -1576,6 +1574,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
1576 dev->pdev->subsystem_device == sub_device; 1574 dev->pdev->subsystem_device == sub_device;
1577} 1575}
1578 1576
1577/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1578 * helpful to determine a number of other hardware features
1579 */
1580static inline int
1581nv44_graph_class(struct drm_device *dev)
1582{
1583 struct drm_nouveau_private *dev_priv = dev->dev_private;
1584
1585 if ((dev_priv->chipset & 0xf0) == 0x60)
1586 return 1;
1587
1588 return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
1589}
1590
1579/* memory type/access flags, do not match hardware values */ 1591/* memory type/access flags, do not match hardware values */
1580#define NV_MEM_ACCESS_RO 1 1592#define NV_MEM_ACCESS_RO 1
1581#define NV_MEM_ACCESS_WO 2 1593#define NV_MEM_ACCESS_WO 2
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a26d04740c88..60769d2f9a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,13 +352,14 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
352 FBINFO_HWACCEL_IMAGEBLIT; 352 FBINFO_HWACCEL_IMAGEBLIT;
353 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 353 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
354 info->fbops = &nouveau_fbcon_sw_ops; 354 info->fbops = &nouveau_fbcon_sw_ops;
355 info->fix.smem_start = dev->mode_config.fb_base + 355 info->fix.smem_start = nvbo->bo.mem.bus.base +
356 (nvbo->bo.mem.start << PAGE_SHIFT); 356 nvbo->bo.mem.bus.offset;
357 info->fix.smem_len = size; 357 info->fix.smem_len = size;
358 358
359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
360 info->screen_size = size; 360 info->screen_size = size;
361 361
362 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
362 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 363 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
363 364
364 /* Set aperture base/size for vesafb takeover */ 365 /* Set aperture base/size for vesafb takeover */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 69044eb104bb..26347b7cd872 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
742{ 742{
743 struct nouveau_mm *mm = man->priv; 743 struct nouveau_mm *mm = man->priv;
744 struct nouveau_mm_node *r; 744 struct nouveau_mm_node *r;
745 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; 745 u32 total = 0, free = 0;
746 int i;
747 746
748 mutex_lock(&mm->mutex); 747 mutex_lock(&mm->mutex);
749 list_for_each_entry(r, &mm->nodes, nl_entry) { 748 list_for_each_entry(r, &mm->nodes, nl_entry) {
750 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", 749 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
751 prefix, r->free ? "free" : "used", r->type, 750 prefix, r->type, ((u64)r->offset << 12),
752 ((u64)r->offset << 12),
753 (((u64)r->offset + r->length) << 12)); 751 (((u64)r->offset + r->length) << 12));
752
754 total += r->length; 753 total += r->length;
755 ttotal[r->type] += r->length; 754 if (!r->type)
756 if (r->free) 755 free += r->length;
757 tfree[r->type] += r->length;
758 else
759 tused[r->type] += r->length;
760 } 756 }
761 mutex_unlock(&mm->mutex); 757 mutex_unlock(&mm->mutex);
762 758
763 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); 759 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
764 for (i = 0; i < 3; i++) { 760 prefix, (u64)total << 12, (u64)free << 12);
765 printk(KERN_DEBUG "%s type %d: 0x%010llx, " 761 printk(KERN_DEBUG "%s block: 0x%08x\n",
766 "used 0x%010llx, free 0x%010llx\n", prefix, 762 prefix, mm->block_size << 12);
767 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
768 }
769} 763}
770 764
771const struct ttm_mem_type_manager_func nouveau_vram_manager = { 765const struct ttm_mem_type_manager_func nouveau_vram_manager = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11eb701b..8844b50c3e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
48 48
49 b->offset = a->offset; 49 b->offset = a->offset;
50 b->length = size; 50 b->length = size;
51 b->free = a->free;
52 b->type = a->type; 51 b->type = a->type;
53 a->offset += size; 52 a->offset += size;
54 a->length -= size; 53 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry); 54 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free) 55 if (b->type == 0)
57 list_add_tail(&b->fl_entry, &a->fl_entry); 56 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b; 57 return b;
59} 58}
60 59
61static struct nouveau_mm_node * 60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88 62
89void 63void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{ 65{
92 u32 block_s, block_l; 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
93 68
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this); 70 this->type = 0;
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104 71
105 /* split off any still-typed region at the start */ 72 if (prev && prev->type == 0) {
106 if (block_s != this->offset) { 73 prev->length += this->length;
107 if (!region_split(rmm, this, block_s - this->offset)) 74 region_put(rmm, this);
108 return; 75 this = prev;
109 } 76 }
110 77
111 /* split off the soon-to-be-untyped block(s) */ 78 if (next && next->type == 0) {
112 block_l = rounddown(this->length, rmm->block_size); 79 next->offset = this->offset;
113 if (block_l != this->length) { 80 next->length += this->length;
114 this = region_split(rmm, this, block_l); 81 region_put(rmm, this);
115 if (!this)
116 return;
117 } 82 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124} 83}
125 84
126int 85int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
129{ 88{
130 struct nouveau_mm_node *this, *tmp, *next; 89 struct nouveau_mm_node *prev, *this, *next;
131 u32 splitoff, avail, alloc; 90 u32 min = size_nc ? size_nc : size;
132 91 u32 align_mask = align - 1;
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { 92 u32 splitoff;
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 93 u32 s, e;
135 if (this->nl_entry.next == &rmm->nodes) 94
136 next = NULL; 95 list_for_each_entry(this, &rmm->free, fl_entry) {
137 96 e = this->offset + this->length;
138 /* skip wrongly typed blocks */ 97 s = this->offset;
139 if (this->type && this->type != type) 98
99 prev = node(this, prev);
100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size);
102
103 next = node(this, next);
104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size);
106
107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask;
109 if (s > e || e - s < min)
140 continue; 110 continue;
141 111
142 /* account for alignment */ 112 splitoff = s - this->offset;
143 splitoff = this->offset & (align - 1); 113 if (splitoff && !region_split(rmm, this, splitoff))
144 if (splitoff) 114 return -ENOMEM;
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214 115
215 this = region_split(rmm, this, alloc); 116 this = region_split(rmm, this, min(size, e - s));
216 if (this == NULL) 117 if (!this)
217 return -ENOMEM; 118 return -ENOMEM;
218 119
219 this->free = false; 120 this->type = type;
220 list_del(&this->fl_entry); 121 list_del(&this->fl_entry);
221 *pnode = this; 122 *pnode = this;
222 return 0; 123 return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 135 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap) 136 if (!heap)
236 return -ENOMEM; 137 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block); 138 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset; 139 heap->length = rounddown(offset + length, block) - heap->offset;
240 140
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index af3844933036..798eaf39691c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -30,9 +30,7 @@ struct nouveau_mm_node {
30 struct list_head fl_entry; 30 struct list_head fl_entry;
31 struct list_head rl_entry; 31 struct list_head rl_entry;
32 32
33 bool free; 33 u8 type;
34 int type;
35
36 u32 offset; 34 u32 offset;
37 u32 length; 35 u32 length;
38}; 36};
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index fb846a3fef15..f05c0cddfeca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
444 444
445 if (pm->hwmon) { 445 if (pm->hwmon) {
446 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 446 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
447 hwmon_device_unregister(pm->hwmon); 447 hwmon_device_unregister(pm->hwmon);
448 } 448 }
449#endif 449#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 7ecc4adc1e45..8d9968e1cba8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -265,8 +265,8 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
265 struct i2c_board_info info[] = { 265 struct i2c_board_info info[] = {
266 { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 266 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
267 { I2C_BOARD_INFO("w83781d", 0x2d) }, 267 { I2C_BOARD_INFO("w83781d", 0x2d) },
268 { I2C_BOARD_INFO("f75375", 0x2e) },
269 { I2C_BOARD_INFO("adt7473", 0x2e) }, 268 { I2C_BOARD_INFO("adt7473", 0x2e) },
269 { I2C_BOARD_INFO("f75375", 0x2e) },
270 { I2C_BOARD_INFO("lm99", 0x4c) }, 270 { I2C_BOARD_INFO("lm99", 0x4c) },
271 { } 271 { }
272 }; 272 };
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 19ef92a0375a..8870d72388c8 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
452 452
453 /* curie */ 453 /* curie */
454 if (dev_priv->chipset >= 0x60 || 454 if (nv44_graph_class(dev))
455 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
456 NVOBJ_CLASS(dev, 0x4497, GR); 455 NVOBJ_CLASS(dev, 0x4497, GR);
457 else 456 else
458 NVOBJ_CLASS(dev, 0x4097, GR); 457 NVOBJ_CLASS(dev, 0x4097, GR);
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index ce585093264e..f70447d131d7 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -118,17 +118,6 @@
118 */ 118 */
119 119
120static int 120static int
121nv40_graph_4097(struct drm_device *dev)
122{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124
125 if ((dev_priv->chipset & 0xf0) == 0x60)
126 return 0;
127
128 return !!(0x0baf & (1 << dev_priv->chipset));
129}
130
131static int
132nv40_graph_vs_count(struct drm_device *dev) 121nv40_graph_vs_count(struct drm_device *dev)
133{ 122{
134 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
219 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
220 } else { 209 } else {
221 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
222 if (!nv40_graph_4097(ctx->dev)) { 211 if (nv44_graph_class(ctx->dev)) {
223 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
224 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
225 } 214 }
@@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
228 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
229 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
230 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
231 if (nv40_graph_4097(ctx->dev)) { 220 if (!nv44_graph_class(ctx->dev)) {
232 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
233 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
234 } 223 }
@@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
546static void 535static void
547nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
548{ 537{
549 int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; 538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
550 539
551 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
552 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
582 } else { 571 } else {
583 b0_offset = 0x1d40/4; /* 2200 */ 572 b0_offset = 0x1d40/4; /* 2200 */
584 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
585 vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; 574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
586 } 575 }
587 576
588 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
589 cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); 578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
590 579
591 offset = ctx->ctxvals_pos; 580 offset = ctx->ctxvals_pos;
592 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index e4e72c12ab6a..03c0d4c3f355 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -6,27 +6,17 @@
6int 6int
7nv40_mc_init(struct drm_device *dev) 7nv40_mc_init(struct drm_device *dev)
8{ 8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will 9 /* Power up everything, resetting each individual unit will
13 * be done later if needed. 10 * be done later if needed.
14 */ 11 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); 12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16 13
17 switch (dev_priv->chipset) { 14 if (nv44_graph_class(dev)) {
18 case 0x44: 15 u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
23 nv_wr32(dev, NV40_PMC_1700, tmp); 16 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0); 17 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0); 18 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp); 19 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 } 20 }
31 21
32 return 0; 22 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 14e24e906ee8..0ea090f4244a 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev)
283 nv50_evo_channel_del(&dev_priv->evo); 283 nv50_evo_channel_del(&dev_priv->evo);
284 return ret; 284 return ret;
285 } 285 }
286 } else 286 } else {
287 if (dev_priv->chipset != 0x50) {
288 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, 287 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
289 0, 0xffffffff, 0x00010000); 288 0, 0xffffffff, 0x00010000);
290 if (ret) { 289 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 2d7ea75a09d4..37e21d2be95b 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -256,6 +256,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
256 struct drm_device *dev = chan->dev; 256 struct drm_device *dev = chan->dev;
257 struct drm_nouveau_private *dev_priv = dev->dev_private; 257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
259 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
259 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 260 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
260 unsigned long flags; 261 unsigned long flags;
261 262
@@ -265,6 +266,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
265 return; 266 return;
266 267
267 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 268 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
269 pfifo->reassign(dev, false);
268 pgraph->fifo_access(dev, false); 270 pgraph->fifo_access(dev, false);
269 271
270 if (pgraph->channel(dev) == chan) 272 if (pgraph->channel(dev) == chan)
@@ -275,6 +277,7 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
275 dev_priv->engine.instmem.flush(dev); 277 dev_priv->engine.instmem.flush(dev);
276 278
277 pgraph->fifo_access(dev, true); 279 pgraph->fifo_access(dev, true);
280 pfifo->reassign(dev, true);
278 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 281 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
279 282
280 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 283 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 2e1b1cd19a4b..ea0041810ae3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
332 gpuobj->vinst = node->vram->offset; 332 gpuobj->vinst = node->vram->offset;
333 333
334 if (gpuobj->flags & NVOBJ_FLAG_VM) { 334 if (gpuobj->flags & NVOBJ_FLAG_VM) {
335 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, 335 u32 flags = NV_MEM_ACCESS_RW;
336 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS;
338
339 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
337 &node->chan_vma); 340 &node->chan_vma);
338 if (ret) { 341 if (ret) {
339 vram->put(dev, &node->vram); 342 vram->put(dev, &node->vram);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 38e523e10995..459ff08241e5 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -45,11 +45,6 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
45 } 45 }
46 46
47 if (phys & 1) { 47 if (phys & 1) {
48 if (dev_priv->vram_sys_base) {
49 phys += dev_priv->vram_sys_base;
50 phys |= 0x30;
51 }
52
53 if (coverage <= 32 * 1024 * 1024) 48 if (coverage <= 32 * 1024 * 1024)
54 phys |= 0x60; 49 phys |= 0x60;
55 else if (coverage <= 64 * 1024 * 1024) 50 else if (coverage <= 64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 5feacd5d5fa4..eb18a7e89f5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -31,6 +31,7 @@
31#include "nvc0_graph.h" 31#include "nvc0_graph.h"
32 32
33static void nvc0_graph_isr(struct drm_device *); 33static void nvc0_graph_isr(struct drm_device *);
34static void nvc0_runk140_isr(struct drm_device *);
34static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); 35static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
35 36
36void 37void
@@ -105,7 +106,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
105 if (ret) 106 if (ret)
106 return ret; 107 return ret;
107 108
108 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, 109 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
110 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
109 &grch->unk418810); 111 &grch->unk418810);
110 if (ret) 112 if (ret)
111 return ret; 113 return ret;
@@ -280,6 +282,7 @@ nvc0_graph_destroy(struct drm_device *dev)
280 return; 282 return;
281 283
282 nouveau_irq_unregister(dev, 12); 284 nouveau_irq_unregister(dev, 12);
285 nouveau_irq_unregister(dev, 25);
283 286
284 nouveau_gpuobj_ref(NULL, &priv->unk4188b8); 287 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
285 nouveau_gpuobj_ref(NULL, &priv->unk4188b4); 288 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -389,6 +392,7 @@ nvc0_graph_create(struct drm_device *dev)
389 } 392 }
390 393
391 nouveau_irq_register(dev, 12, nvc0_graph_isr); 394 nouveau_irq_register(dev, 12, nvc0_graph_isr);
395 nouveau_irq_register(dev, 25, nvc0_runk140_isr);
392 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ 396 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
393 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ 397 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
394 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ 398 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
@@ -511,8 +515,8 @@ nvc0_graph_init_gpc_1(struct drm_device *dev)
511 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); 515 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
512 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); 516 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
513 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); 517 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
514 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe); 518 nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
515 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f); 519 nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
516 } 520 }
517 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); 521 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
518 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); 522 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -776,3 +780,19 @@ nvc0_graph_isr(struct drm_device *dev)
776 780
777 nv_wr32(dev, 0x400500, 0x00010001); 781 nv_wr32(dev, 0x400500, 0x00010001);
778} 782}
783
784static void
785nvc0_runk140_isr(struct drm_device *dev)
786{
787 u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
788
789 while (units) {
790 u32 unit = ffs(units) - 1;
791 u32 reg = 0x140000 + unit * 0x2000;
792 u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
793 u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
794
795 NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
796 units &= ~(1 << unit);
797 }
798}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index b9e68b2d30aa..f880ff776db8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1830,7 +1830,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1830 1830
1831 for (tp = 0, id = 0; tp < 4; tp++) { 1831 for (tp = 0, id = 0; tp < 4; tp++) {
1832 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 1832 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1833 if (tp <= priv->tp_nr[gpc]) { 1833 if (tp < priv->tp_nr[gpc]) {
1834 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); 1834 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
1835 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); 1835 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
1836 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); 1836 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 4b9251bb0ff4..e4e83c2caf5b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
48 phys >>= 8; 48 phys >>= 8;
49 49
50 phys |= 0x00000001; /* present */ 50 phys |= 0x00000001; /* present */
51// if (vma->access & NV_MEM_ACCESS_SYS) 51 if (vma->access & NV_MEM_ACCESS_SYS)
52// phys |= 0x00000002; 52 phys |= 0x00000002;
53 53
54 phys |= ((u64)target << 32); 54 phys |= ((u64)target << 32);
55 phys |= ((u64)memtype << 36); 55 phys |= ((u64)memtype << 36);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b0ab185b86f6..b1537000a104 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -555,6 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 555 dp_clock = dig_connector->dp_clock;
556 } 556 }
557 } 557 }
558/* this might work properly with the new pll algo */
558#if 0 /* doesn't work properly on some laptops */ 559#if 0 /* doesn't work properly on some laptops */
559 /* use recommended ref_div for ss */ 560 /* use recommended ref_div for ss */
560 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 561 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -572,6 +573,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
572 adjusted_clock = mode->clock * 2; 573 adjusted_clock = mode->clock * 2;
573 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 574 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
574 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 575 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
576 /* rv515 needs more testing with this option */
577 if (rdev->family != CHIP_RV515) {
578 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
579 pll->flags |= RADEON_PLL_IS_LCD;
580 }
575 } else { 581 } else {
576 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 582 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
577 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 583 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -606,14 +612,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
606 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 612 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
607 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 613 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
608 args.v1.ucEncodeMode = encoder_mode; 614 args.v1.ucEncodeMode = encoder_mode;
609 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 615 if (ss_enabled)
610 if (ss_enabled)
611 args.v1.ucConfig |=
612 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
613 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
614 args.v1.ucConfig |= 616 args.v1.ucConfig |=
615 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 617 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
616 }
617 618
618 atom_execute_table(rdev->mode_info.atom_context, 619 atom_execute_table(rdev->mode_info.atom_context,
619 index, (uint32_t *)&args); 620 index, (uint32_t *)&args);
@@ -624,12 +625,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
624 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 625 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
625 args.v3.sInput.ucEncodeMode = encoder_mode; 626 args.v3.sInput.ucEncodeMode = encoder_mode;
626 args.v3.sInput.ucDispPllConfig = 0; 627 args.v3.sInput.ucDispPllConfig = 0;
628 if (ss_enabled)
629 args.v3.sInput.ucDispPllConfig |=
630 DISPPLL_CONFIG_SS_ENABLE;
627 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 631 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
628 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 632 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
629 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 633 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
630 if (ss_enabled)
631 args.v3.sInput.ucDispPllConfig |=
632 DISPPLL_CONFIG_SS_ENABLE;
633 args.v3.sInput.ucDispPllConfig |= 634 args.v3.sInput.ucDispPllConfig |=
634 DISPPLL_CONFIG_COHERENT_MODE; 635 DISPPLL_CONFIG_COHERENT_MODE;
635 /* 16200 or 27000 */ 636 /* 16200 or 27000 */
@@ -649,18 +650,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
649 } 650 }
650 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 651 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
651 if (encoder_mode == ATOM_ENCODER_MODE_DP) { 652 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
652 if (ss_enabled)
653 args.v3.sInput.ucDispPllConfig |=
654 DISPPLL_CONFIG_SS_ENABLE;
655 args.v3.sInput.ucDispPllConfig |= 653 args.v3.sInput.ucDispPllConfig |=
656 DISPPLL_CONFIG_COHERENT_MODE; 654 DISPPLL_CONFIG_COHERENT_MODE;
657 /* 16200 or 27000 */ 655 /* 16200 or 27000 */
658 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); 656 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
659 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { 657 } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
660 if (ss_enabled)
661 args.v3.sInput.ucDispPllConfig |=
662 DISPPLL_CONFIG_SS_ENABLE;
663 } else {
664 if (mode->clock > 165000) 658 if (mode->clock > 165000)
665 args.v3.sInput.ucDispPllConfig |= 659 args.v3.sInput.ucDispPllConfig |=
666 DISPPLL_CONFIG_DUAL_LINK; 660 DISPPLL_CONFIG_DUAL_LINK;
@@ -963,8 +957,16 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
963 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
964 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
965 959
966 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 960 /* rv515 seems happier with the old algo */
967 &ref_div, &post_div); 961 if (rdev->family == CHIP_RV515)
962 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
963 &ref_div, &post_div);
964 else if (ASIC_IS_AVIVO(rdev))
965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
966 &ref_div, &post_div);
967 else
968 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
969 &ref_div, &post_div);
968 970
969 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); 971 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
970 972
@@ -1006,6 +1008,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1006 struct radeon_bo *rbo; 1008 struct radeon_bo *rbo;
1007 uint64_t fb_location; 1009 uint64_t fb_location;
1008 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1010 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1011 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1009 int r; 1012 int r;
1010 1013
1011 /* no fb bound */ 1014 /* no fb bound */
@@ -1057,11 +1060,17 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1057 case 16: 1060 case 16:
1058 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | 1061 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
1059 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); 1062 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
1063#ifdef __BIG_ENDIAN
1064 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
1065#endif
1060 break; 1066 break;
1061 case 24: 1067 case 24:
1062 case 32: 1068 case 32:
1063 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | 1069 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
1064 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); 1070 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
1071#ifdef __BIG_ENDIAN
1072 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
1073#endif
1065 break; 1074 break;
1066 default: 1075 default:
1067 DRM_ERROR("Unsupported screen depth %d\n", 1076 DRM_ERROR("Unsupported screen depth %d\n",
@@ -1106,6 +1115,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1106 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1115 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
1107 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); 1116 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
1108 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1117 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1118 WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1109 1119
1110 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1120 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1111 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1121 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1162,6 +1172,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1162 struct drm_framebuffer *target_fb; 1172 struct drm_framebuffer *target_fb;
1163 uint64_t fb_location; 1173 uint64_t fb_location;
1164 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1174 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1175 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1165 int r; 1176 int r;
1166 1177
1167 /* no fb bound */ 1178 /* no fb bound */
@@ -1215,12 +1226,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1215 fb_format = 1226 fb_format =
1216 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | 1227 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
1217 AVIVO_D1GRPH_CONTROL_16BPP_RGB565; 1228 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
1229#ifdef __BIG_ENDIAN
1230 fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
1231#endif
1218 break; 1232 break;
1219 case 24: 1233 case 24:
1220 case 32: 1234 case 32:
1221 fb_format = 1235 fb_format =
1222 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | 1236 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
1223 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; 1237 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
1238#ifdef __BIG_ENDIAN
1239 fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
1240#endif
1224 break; 1241 break;
1225 default: 1242 default:
1226 DRM_ERROR("Unsupported screen depth %d\n", 1243 DRM_ERROR("Unsupported screen depth %d\n",
@@ -1260,6 +1277,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1260 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + 1277 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
1261 radeon_crtc->crtc_offset, (u32) fb_location); 1278 radeon_crtc->crtc_offset, (u32) fb_location);
1262 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); 1279 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
1280 if (rdev->family >= CHIP_R600)
1281 WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
1263 1282
1264 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); 1283 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
1265 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); 1284 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d44b8d..695de9a38506 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -187,9 +187,9 @@ static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) 187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
188{ 188{
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); 189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
190 int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); 190 int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
191 191
192 if ((lanes == 0) || (bw == 0)) 192 if ((lanes == 0) || (dp_clock == 0))
193 return MODE_CLOCK_HIGH; 193 return MODE_CLOCK_HIGH;
194 194
195 return MODE_OK; 195 return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fe8ebdcdc0e..ffdc8332b76e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
97} 97}
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 evergreen_get_temp(struct radeon_device *rdev) 100int evergreen_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 u32 actual_temp = 0; 104 u32 actual_temp = 0;
105 105
106 if ((temp >> 10) & 1) 106 if (temp & 0x400)
107 actual_temp = 0; 107 actual_temp = -256;
108 else if ((temp >> 9) & 1) 108 else if (temp & 0x200)
109 actual_temp = 255; 109 actual_temp = 255;
110 else 110 else if (temp & 0x100) {
111 actual_temp = (temp >> 1) & 0xff; 111 actual_temp = temp & 0x1ff;
112 actual_temp |= ~0x1ff;
113 } else
114 actual_temp = temp & 0xff;
112 115
113 return actual_temp * 1000; 116 return (actual_temp * 1000) / 2;
114} 117}
115 118
116u32 sumo_get_temp(struct radeon_device *rdev) 119int sumo_get_temp(struct radeon_device *rdev)
117{ 120{
118 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; 121 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
119 u32 actual_temp = (temp >> 1) & 0xff; 122 int actual_temp = temp - 49;
120 123
121 return actual_temp * 1000; 124 return actual_temp * 1000;
122} 125}
@@ -1182,6 +1185,18 @@ static void evergreen_mc_program(struct radeon_device *rdev)
1182/* 1185/*
1183 * CP. 1186 * CP.
1184 */ 1187 */
1188void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1189{
1190 /* set to DX10/11 mode */
1191 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
1196 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1197 radeon_ring_write(rdev, ib->length_dw);
1198}
1199
1185 1200
1186static int evergreen_cp_load_microcode(struct radeon_device *rdev) 1201static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1187{ 1202{
@@ -1233,7 +1248,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1233 cp_me = 0xff; 1248 cp_me = 0xff;
1234 WREG32(CP_ME_CNTL, cp_me); 1249 WREG32(CP_ME_CNTL, cp_me);
1235 1250
1236 r = radeon_ring_lock(rdev, evergreen_default_size + 15); 1251 r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1237 if (r) { 1252 if (r) {
1238 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1253 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1239 return r; 1254 return r;
@@ -1266,6 +1281,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1266 radeon_ring_write(rdev, 0xffffffff); 1281 radeon_ring_write(rdev, 0xffffffff);
1267 radeon_ring_write(rdev, 0xffffffff); 1282 radeon_ring_write(rdev, 0xffffffff);
1268 1283
1284 radeon_ring_write(rdev, 0xc0026900);
1285 radeon_ring_write(rdev, 0x00000316);
1286 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1287 radeon_ring_write(rdev, 0x00000010); /* */
1288
1269 radeon_ring_unlock_commit(rdev); 1289 radeon_ring_unlock_commit(rdev);
1270 1290
1271 return 0; 1291 return 0;
@@ -2072,6 +2092,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2072 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2092 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2073 2093
2074 WREG32(VGT_GS_VERTEX_REUSE, 16); 2094 WREG32(VGT_GS_VERTEX_REUSE, 16);
2095 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2075 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2096 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2076 2097
2077 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 2098 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2201,6 +2222,9 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2201 struct evergreen_mc_save save; 2222 struct evergreen_mc_save save;
2202 u32 grbm_reset = 0; 2223 u32 grbm_reset = 0;
2203 2224
2225 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2226 return 0;
2227
2204 dev_info(rdev->dev, "GPU softreset \n"); 2228 dev_info(rdev->dev, "GPU softreset \n");
2205 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2229 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2206 RREG32(GRBM_STATUS)); 2230 RREG32(GRBM_STATUS));
@@ -3002,31 +3026,6 @@ int evergreen_copy_blit(struct radeon_device *rdev,
3002 return 0; 3026 return 0;
3003} 3027}
3004 3028
3005static bool evergreen_card_posted(struct radeon_device *rdev)
3006{
3007 u32 reg;
3008
3009 /* first check CRTCs */
3010 if (rdev->flags & RADEON_IS_IGP)
3011 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3012 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
3013 else
3014 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
3015 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
3016 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
3017 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
3018 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
3019 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
3020 if (reg & EVERGREEN_CRTC_MASTER_EN)
3021 return true;
3022
3023 /* then check MEM_SIZE, in case the crtcs are off */
3024 if (RREG32(CONFIG_MEMSIZE))
3025 return true;
3026
3027 return false;
3028}
3029
3030/* Plan is to move initialization in that function and use 3029/* Plan is to move initialization in that function and use
3031 * helper function so that radeon_device_init pretty much 3030 * helper function so that radeon_device_init pretty much
3032 * do nothing more than calling asic specific function. This 3031 * do nothing more than calling asic specific function. This
@@ -3063,7 +3062,7 @@ int evergreen_init(struct radeon_device *rdev)
3063 if (radeon_asic_reset(rdev)) 3062 if (radeon_asic_reset(rdev))
3064 dev_warn(rdev->dev, "GPU reset failed !\n"); 3063 dev_warn(rdev->dev, "GPU reset failed !\n");
3065 /* Post card if necessary */ 3064 /* Post card if necessary */
3066 if (!evergreen_card_posted(rdev)) { 3065 if (!radeon_card_posted(rdev)) {
3067 if (!rdev->bios) { 3066 if (!rdev->bios) {
3068 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3067 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3069 return -EINVAL; 3068 return -EINVAL;
@@ -3158,6 +3157,9 @@ static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3158{ 3157{
3159 u32 link_width_cntl, speed_cntl; 3158 u32 link_width_cntl, speed_cntl;
3160 3159
3160 if (radeon_pcie_gen2 == 0)
3161 return;
3162
3161 if (rdev->flags & RADEON_IS_IGP) 3163 if (rdev->flags & RADEON_IS_IGP)
3162 return; 3164 return;
3163 3165
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index b758dc7f2f2c..a1ba4b3053d0 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -232,7 +232,7 @@ draw_auto(struct radeon_device *rdev)
232 232
233} 233}
234 234
235/* emits 30 */ 235/* emits 36 */
236static void 236static void
237set_default_state(struct radeon_device *rdev) 237set_default_state(struct radeon_device *rdev)
238{ 238{
@@ -245,6 +245,8 @@ set_default_state(struct radeon_device *rdev)
245 int num_hs_threads, num_ls_threads; 245 int num_hs_threads, num_ls_threads;
246 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; 246 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
247 int num_hs_stack_entries, num_ls_stack_entries; 247 int num_hs_stack_entries, num_ls_stack_entries;
248 u64 gpu_addr;
249 int dwords;
248 250
249 switch (rdev->family) { 251 switch (rdev->family) {
250 case CHIP_CEDAR: 252 case CHIP_CEDAR:
@@ -497,6 +499,18 @@ set_default_state(struct radeon_device *rdev)
497 radeon_ring_write(rdev, 0x00000000); 499 radeon_ring_write(rdev, 0x00000000);
498 radeon_ring_write(rdev, 0x00000000); 500 radeon_ring_write(rdev, 0x00000000);
499 501
502 /* set to DX10/11 mode */
503 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
504 radeon_ring_write(rdev, 1);
505
506 /* emit an IB pointing at default state */
507 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
508 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
509 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
510 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
511 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
512 radeon_ring_write(rdev, dwords);
513
500} 514}
501 515
502static inline uint32_t i2f(uint32_t input) 516static inline uint32_t i2f(uint32_t input)
@@ -527,8 +541,10 @@ static inline uint32_t i2f(uint32_t input)
527int evergreen_blit_init(struct radeon_device *rdev) 541int evergreen_blit_init(struct radeon_device *rdev)
528{ 542{
529 u32 obj_size; 543 u32 obj_size;
530 int r; 544 int r, dwords;
531 void *ptr; 545 void *ptr;
546 u32 packet2s[16];
547 int num_packet2s = 0;
532 548
533 /* pin copy shader into vram if already initialized */ 549 /* pin copy shader into vram if already initialized */
534 if (rdev->r600_blit.shader_obj) 550 if (rdev->r600_blit.shader_obj)
@@ -536,8 +552,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
536 552
537 mutex_init(&rdev->r600_blit.mutex); 553 mutex_init(&rdev->r600_blit.mutex);
538 rdev->r600_blit.state_offset = 0; 554 rdev->r600_blit.state_offset = 0;
539 rdev->r600_blit.state_len = 0; 555
540 obj_size = 0; 556 rdev->r600_blit.state_len = evergreen_default_size;
557
558 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0);
561 dwords++;
562 }
563
564 obj_size = dwords * 4;
565 obj_size = ALIGN(obj_size, 256);
541 566
542 rdev->r600_blit.vs_offset = obj_size; 567 rdev->r600_blit.vs_offset = obj_size;
543 obj_size += evergreen_vs_size * 4; 568 obj_size += evergreen_vs_size * 4;
@@ -567,6 +592,12 @@ int evergreen_blit_init(struct radeon_device *rdev)
567 return r; 592 return r;
568 } 593 }
569 594
595 memcpy_toio(ptr + rdev->r600_blit.state_offset,
596 evergreen_default_state, rdev->r600_blit.state_len * 4);
597
598 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4);
570 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
571 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
572 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 603 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
@@ -652,7 +683,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
652 /* calculate number of loops correctly */ 683 /* calculate number of loops correctly */
653 ring_size = num_loops * dwords_per_loop; 684 ring_size = num_loops * dwords_per_loop;
654 /* set default + shaders */ 685 /* set default + shaders */
655 ring_size += 46; /* shaders + def state */ 686 ring_size += 52; /* shaders + def state */
656 ring_size += 10; /* fence emit for VB IB */ 687 ring_size += 10; /* fence emit for VB IB */
657 ring_size += 5; /* done copy */ 688 ring_size += 5; /* done copy */
658 ring_size += 10; /* fence emit for done copy */ 689 ring_size += 10; /* fence emit for done copy */
@@ -660,7 +691,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
660 if (r) 691 if (r)
661 return r; 692 return r;
662 693
663 set_default_state(rdev); /* 30 */ 694 set_default_state(rdev); /* 36 */
664 set_shaders(rdev); /* 16 */ 695 set_shaders(rdev); /* 16 */
665 return 0; 696 return 0;
666} 697}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 36d32d83d866..afec1aca2a73 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -240,6 +240,7 @@
240#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) 240#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
241#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) 241#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
242#define PA_SC_LINE_STIPPLE 0x28A0C 242#define PA_SC_LINE_STIPPLE 0x28A0C
243#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
243#define PA_SC_LINE_STIPPLE_STATE 0x8B10 244#define PA_SC_LINE_STIPPLE_STATE 0x8B10
244 245
245#define SCRATCH_REG0 0x8500 246#define SCRATCH_REG0 0x8500
@@ -652,6 +653,7 @@
652#define PACKET3_DISPATCH_DIRECT 0x15 653#define PACKET3_DISPATCH_DIRECT 0x15
653#define PACKET3_DISPATCH_INDIRECT 0x16 654#define PACKET3_DISPATCH_INDIRECT 0x16
654#define PACKET3_INDIRECT_BUFFER_END 0x17 655#define PACKET3_INDIRECT_BUFFER_END 0x17
656#define PACKET3_MODE_CONTROL 0x18
655#define PACKET3_SET_PREDICATION 0x20 657#define PACKET3_SET_PREDICATION 0x20
656#define PACKET3_REG_RMW 0x21 658#define PACKET3_REG_RMW 0x21
657#define PACKET3_COND_EXEC 0x22 659#define PACKET3_COND_EXEC 0x22
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f637595b14e1..5f15820efe12 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1031,8 +1031,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1031 WREG32(RADEON_CP_CSQ_MODE, 1031 WREG32(RADEON_CP_CSQ_MODE,
1032 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1032 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1033 REG_SET(RADEON_INDIRECT1_START, indirect1_start)); 1033 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1034 WREG32(0x718, 0); 1034 WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1035 WREG32(0x744, 0x00004D4D); 1035 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1036 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1036 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1037 radeon_ring_start(rdev); 1037 radeon_ring_start(rdev);
1038 r = radeon_ring_test(rdev); 1038 r = radeon_ring_test(rdev);
@@ -2086,12 +2086,13 @@ int r100_asic_reset(struct radeon_device *rdev)
2086{ 2086{
2087 struct r100_mc_save save; 2087 struct r100_mc_save save;
2088 u32 status, tmp; 2088 u32 status, tmp;
2089 int ret = 0;
2089 2090
2090 r100_mc_stop(rdev, &save);
2091 status = RREG32(R_000E40_RBBM_STATUS); 2091 status = RREG32(R_000E40_RBBM_STATUS);
2092 if (!G_000E40_GUI_ACTIVE(status)) { 2092 if (!G_000E40_GUI_ACTIVE(status)) {
2093 return 0; 2093 return 0;
2094 } 2094 }
2095 r100_mc_stop(rdev, &save);
2095 status = RREG32(R_000E40_RBBM_STATUS); 2096 status = RREG32(R_000E40_RBBM_STATUS);
2096 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2097 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2097 /* stop CP */ 2098 /* stop CP */
@@ -2131,11 +2132,11 @@ int r100_asic_reset(struct radeon_device *rdev)
2131 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2132 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2132 dev_err(rdev->dev, "failed to reset GPU\n"); 2133 dev_err(rdev->dev, "failed to reset GPU\n");
2133 rdev->gpu_lockup = true; 2134 rdev->gpu_lockup = true;
2134 return -1; 2135 ret = -1;
2135 } 2136 } else
2137 dev_info(rdev->dev, "GPU reset succeed\n");
2136 r100_mc_resume(rdev, &save); 2138 r100_mc_resume(rdev, &save);
2137 dev_info(rdev->dev, "GPU reset succeed\n"); 2139 return ret;
2138 return 0;
2139} 2140}
2140 2141
2141void r100_set_common_regs(struct radeon_device *rdev) 2142void r100_set_common_regs(struct radeon_device *rdev)
@@ -2346,10 +2347,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
2346 2347
2347 temp = RREG32(RADEON_CONFIG_CNTL); 2348 temp = RREG32(RADEON_CONFIG_CNTL);
2348 if (state == false) { 2349 if (state == false) {
2349 temp &= ~(1<<8); 2350 temp &= ~RADEON_CFG_VGA_RAM_EN;
2350 temp |= (1<<9); 2351 temp |= RADEON_CFG_VGA_IO_DIS;
2351 } else { 2352 } else {
2352 temp &= ~(1<<9); 2353 temp &= ~RADEON_CFG_VGA_IO_DIS;
2353 } 2354 }
2354 WREG32(RADEON_CONFIG_CNTL, temp); 2355 WREG32(RADEON_CONFIG_CNTL, temp);
2355} 2356}
@@ -3521,7 +3522,7 @@ int r100_ring_test(struct radeon_device *rdev)
3521 if (i < rdev->usec_timeout) { 3522 if (i < rdev->usec_timeout) {
3522 DRM_INFO("ring test succeeded in %d usecs\n", i); 3523 DRM_INFO("ring test succeeded in %d usecs\n", i);
3523 } else { 3524 } else {
3524 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", 3525 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3525 scratch, tmp); 3526 scratch, tmp);
3526 r = -EINVAL; 3527 r = -EINVAL;
3527 } 3528 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fae5e709f270..55fe5ba7def3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,6 +69,9 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
69 mb(); 69 mb();
70} 70}
71 71
72#define R300_PTE_WRITEABLE (1 << 2)
73#define R300_PTE_READABLE (1 << 3)
74
72int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 75int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
73{ 76{
74 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 77 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
78 } 81 }
79 addr = (lower_32_bits(addr) >> 8) | 82 addr = (lower_32_bits(addr) >> 8) |
80 ((upper_32_bits(addr) & 0xff) << 24) | 83 ((upper_32_bits(addr) & 0xff) << 24) |
81 0xc; 84 R300_PTE_WRITEABLE | R300_PTE_READABLE;
82 /* on x86 we want this to be CPU endian, on powerpc 85 /* on x86 we want this to be CPU endian, on powerpc
83 * on powerpc without HW swappers, it'll get swapped on way 86 * on powerpc without HW swappers, it'll get swapped on way
84 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ 87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); 138 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
136 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 139 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
137 /* Clear error */ 140 /* Clear error */
138 WREG32_PCIE(0x18, 0); 141 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
139 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
140 tmp |= RADEON_PCIE_TX_GART_EN; 143 tmp |= RADEON_PCIE_TX_GART_EN;
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -405,12 +408,13 @@ int r300_asic_reset(struct radeon_device *rdev)
405{ 408{
406 struct r100_mc_save save; 409 struct r100_mc_save save;
407 u32 status, tmp; 410 u32 status, tmp;
411 int ret = 0;
408 412
409 r100_mc_stop(rdev, &save);
410 status = RREG32(R_000E40_RBBM_STATUS); 413 status = RREG32(R_000E40_RBBM_STATUS);
411 if (!G_000E40_GUI_ACTIVE(status)) { 414 if (!G_000E40_GUI_ACTIVE(status)) {
412 return 0; 415 return 0;
413 } 416 }
417 r100_mc_stop(rdev, &save);
414 status = RREG32(R_000E40_RBBM_STATUS); 418 status = RREG32(R_000E40_RBBM_STATUS);
415 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 419 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
416 /* stop CP */ 420 /* stop CP */
@@ -451,11 +455,11 @@ int r300_asic_reset(struct radeon_device *rdev)
451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 455 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
452 dev_err(rdev->dev, "failed to reset GPU\n"); 456 dev_err(rdev->dev, "failed to reset GPU\n");
453 rdev->gpu_lockup = true; 457 rdev->gpu_lockup = true;
454 return -1; 458 ret = -1;
455 } 459 } else
460 dev_info(rdev->dev, "GPU reset succeed\n");
456 r100_mc_resume(rdev, &save); 461 r100_mc_resume(rdev, &save);
457 dev_info(rdev->dev, "GPU reset succeed\n"); 462 return ret;
458 return 0;
459} 463}
460 464
461/* 465/*
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c387346f93a9..0b59ed7c7d2c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -96,7 +96,7 @@ void r420_pipes_init(struct radeon_device *rdev)
96 "programming pipes. Bad things might happen.\n"); 96 "programming pipes. Bad things might happen.\n");
97 } 97 }
98 /* get max number of pipes */ 98 /* get max number of pipes */
99 gb_pipe_select = RREG32(0x402C); 99 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
100 num_pipes = ((gb_pipe_select >> 12) & 3) + 1; 100 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
101 101
102 /* SE chips have 1 pipe */ 102 /* SE chips have 1 pipe */
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c8677f9e385..2ce80d976568 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -79,8 +79,8 @@ static void r520_gpu_init(struct radeon_device *rdev)
79 WREG32(0x4128, 0xFF); 79 WREG32(0x4128, 0xFF);
80 } 80 }
81 r420_pipes_init(rdev); 81 r420_pipes_init(rdev);
82 gb_pipe_select = RREG32(0x402C); 82 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
83 tmp = RREG32(0x170C); 83 tmp = RREG32(R300_DST_PIPE_CONFIG);
84 pipe_select_current = (tmp >> 2) & 3; 84 pipe_select_current = (tmp >> 2) & 3;
85 tmp = (1 << pipe_select_current) | 85 tmp = (1 << pipe_select_current) |
86 (((gb_pipe_select >> 8) & 0xF) << 4); 86 (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6b50716267c0..650672a0f5ad 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev);
97static void r600_pcie_gen2_enable(struct radeon_device *rdev); 97static void r600_pcie_gen2_enable(struct radeon_device *rdev);
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 rv6xx_get_temp(struct radeon_device *rdev) 100int rv6xx_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 int actual_temp = temp & 0xff;
104 105
105 return temp * 1000; 106 if (temp & 0x100)
107 actual_temp -= 256;
108
109 return actual_temp * 1000;
106} 110}
107 111
108void r600_pm_get_dynpm_state(struct radeon_device *rdev) 112void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1287,6 +1291,9 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
1287 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 1291 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1288 u32 tmp; 1292 u32 tmp;
1289 1293
1294 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1295 return 0;
1296
1290 dev_info(rdev->dev, "GPU softreset \n"); 1297 dev_info(rdev->dev, "GPU softreset \n");
1291 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", 1298 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1292 RREG32(R_008010_GRBM_STATUS)); 1299 RREG32(R_008010_GRBM_STATUS));
@@ -2358,24 +2365,6 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2358 /* FIXME: implement */ 2365 /* FIXME: implement */
2359} 2366}
2360 2367
2361
2362bool r600_card_posted(struct radeon_device *rdev)
2363{
2364 uint32_t reg;
2365
2366 /* first check CRTCs */
2367 reg = RREG32(D1CRTC_CONTROL) |
2368 RREG32(D2CRTC_CONTROL);
2369 if (reg & CRTC_EN)
2370 return true;
2371
2372 /* then check MEM_SIZE, in case the crtcs are off */
2373 if (RREG32(CONFIG_MEMSIZE))
2374 return true;
2375
2376 return false;
2377}
2378
2379int r600_startup(struct radeon_device *rdev) 2368int r600_startup(struct radeon_device *rdev)
2380{ 2369{
2381 int r; 2370 int r;
@@ -2536,7 +2525,7 @@ int r600_init(struct radeon_device *rdev)
2536 if (r) 2525 if (r)
2537 return r; 2526 return r;
2538 /* Post card if necessary */ 2527 /* Post card if necessary */
2539 if (!r600_card_posted(rdev)) { 2528 if (!radeon_card_posted(rdev)) {
2540 if (!rdev->bios) { 2529 if (!rdev->bios) {
2541 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2530 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2542 return -EINVAL; 2531 return -EINVAL;
@@ -3658,6 +3647,9 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3658 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 3647 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3659 u16 link_cntl2; 3648 u16 link_cntl2;
3660 3649
3650 if (radeon_pcie_gen2 == 0)
3651 return;
3652
3661 if (rdev->flags & RADEON_IS_IGP) 3653 if (rdev->flags & RADEON_IS_IGP)
3662 return; 3654 return;
3663 3655
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 33cda016b083..f869897c7456 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -81,7 +81,11 @@
81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
83 83
84 84#define R600_D1GRPH_SWAP_CONTROL 0x610C
85# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
86# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
87# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
88# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
85 89
86#define R600_HDP_NONSURFACE_BASE 0x2c04 90#define R600_HDP_NONSURFACE_BASE 0x2c04
87 91
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e9486630a467..56c48b67ef3d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -92,6 +92,7 @@ extern int radeon_tv;
92extern int radeon_audio; 92extern int radeon_audio;
93extern int radeon_disp_priority; 93extern int radeon_disp_priority;
94extern int radeon_hw_i2c; 94extern int radeon_hw_i2c;
95extern int radeon_pcie_gen2;
95 96
96/* 97/*
97 * Copy from radeon_drv.h so we don't have to include both and have conflicting 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -178,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 179void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
180void rs690_pm_info(struct radeon_device *rdev); 181void rs690_pm_info(struct radeon_device *rdev);
181extern u32 rv6xx_get_temp(struct radeon_device *rdev); 182extern int rv6xx_get_temp(struct radeon_device *rdev);
182extern u32 rv770_get_temp(struct radeon_device *rdev); 183extern int rv770_get_temp(struct radeon_device *rdev);
183extern u32 evergreen_get_temp(struct radeon_device *rdev); 184extern int evergreen_get_temp(struct radeon_device *rdev);
184extern u32 sumo_get_temp(struct radeon_device *rdev); 185extern int sumo_get_temp(struct radeon_device *rdev);
185 186
186/* 187/*
187 * Fences. 188 * Fences.
@@ -811,8 +812,7 @@ struct radeon_pm {
811 fixed20_12 sclk; 812 fixed20_12 sclk;
812 fixed20_12 mclk; 813 fixed20_12 mclk;
813 fixed20_12 needed_bandwidth; 814 fixed20_12 needed_bandwidth;
814 /* XXX: use a define for num power modes */ 815 struct radeon_power_state *power_state;
815 struct radeon_power_state power_state[8];
816 /* number of valid power states */ 816 /* number of valid power states */
817 int num_power_states; 817 int num_power_states;
818 int current_power_state_index; 818 int current_power_state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 3a1b16186224..e75d63b8e21d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = {
759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
760 .gart_set_page = &rs600_gart_set_page, 760 .gart_set_page = &rs600_gart_set_page,
761 .ring_test = &r600_ring_test, 761 .ring_test = &r600_ring_test,
762 .ring_ib_execute = &r600_ring_ib_execute, 762 .ring_ib_execute = &evergreen_ring_ib_execute,
763 .irq_set = &evergreen_irq_set, 763 .irq_set = &evergreen_irq_set,
764 .irq_process = &evergreen_irq_process, 764 .irq_process = &evergreen_irq_process,
765 .get_vblank_counter = &evergreen_get_vblank_counter, 765 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = {
805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
806 .gart_set_page = &rs600_gart_set_page, 806 .gart_set_page = &rs600_gart_set_page,
807 .ring_test = &r600_ring_test, 807 .ring_test = &r600_ring_test,
808 .ring_ib_execute = &r600_ring_ib_execute, 808 .ring_ib_execute = &evergreen_ring_ib_execute,
809 .irq_set = &evergreen_irq_set, 809 .irq_set = &evergreen_irq_set,
810 .irq_process = &evergreen_irq_process, 810 .irq_process = &evergreen_irq_process,
811 .get_vblank_counter = &evergreen_get_vblank_counter, 811 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = {
848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
849 .gart_set_page = &rs600_gart_set_page, 849 .gart_set_page = &rs600_gart_set_page,
850 .ring_test = &r600_ring_test, 850 .ring_test = &r600_ring_test,
851 .ring_ib_execute = &r600_ring_ib_execute, 851 .ring_ib_execute = &evergreen_ring_ib_execute,
852 .irq_set = &evergreen_irq_set, 852 .irq_set = &evergreen_irq_set,
853 .irq_process = &evergreen_irq_process, 853 .irq_process = &evergreen_irq_process,
854 .get_vblank_counter = &evergreen_get_vblank_counter, 854 .get_vblank_counter = &evergreen_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e01f07718539..c59bd98a2029 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev);
355bool evergreen_gpu_is_lockup(struct radeon_device *rdev); 355bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
356int evergreen_asic_reset(struct radeon_device *rdev); 356int evergreen_asic_reset(struct radeon_device *rdev);
357void evergreen_bandwidth_update(struct radeon_device *rdev); 357void evergreen_bandwidth_update(struct radeon_device *rdev);
358void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
358int evergreen_copy_blit(struct radeon_device *rdev, 359int evergreen_copy_blit(struct radeon_device *rdev,
359 uint64_t src_offset, uint64_t dst_offset, 360 uint64_t src_offset, uint64_t dst_offset,
360 unsigned num_pages, struct radeon_fence *fence); 361 unsigned num_pages, struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1573202a6418..5c1cc7ad9a15 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -387,15 +387,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
387 *line_mux = 0x90; 387 *line_mux = 0x90;
388 } 388 }
389 389
390 /* mac rv630 */ 390 /* mac rv630, rv730, others */
391 if ((dev->pdev->device == 0x9588) && 391 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
392 (dev->pdev->subsystem_vendor == 0x106b) && 392 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
393 (dev->pdev->subsystem_device == 0x00a6)) { 393 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
394 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && 394 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
395 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
396 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
397 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
398 }
399 } 395 }
400 396
401 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 397 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -1167,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1167 p1pll->pll_out_min = 64800; 1163 p1pll->pll_out_min = 64800;
1168 else 1164 else
1169 p1pll->pll_out_min = 20000; 1165 p1pll->pll_out_min = 20000;
1170 } else if (p1pll->pll_out_min > 64800) {
1171 /* Limiting the pll output range is a good thing generally as
1172 * it limits the number of possible pll combinations for a given
1173 * frequency presumably to the ones that work best on each card.
1174 * However, certain duallink DVI monitors seem to like
1175 * pll combinations that would be limited by this at least on
1176 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
1177 * family.
1178 */
1179 p1pll->pll_out_min = 64800;
1180 } 1166 }
1181 1167
1182 p1pll->pll_in_min = 1168 p1pll->pll_in_min =
@@ -1991,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1991 num_modes = power_info->info.ucNumOfPowerModeEntries; 1977 num_modes = power_info->info.ucNumOfPowerModeEntries;
1992 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1978 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1993 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1979 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1980 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
1981 if (!rdev->pm.power_state)
1982 return state_index;
1994 /* last mode is usually default, array is low to high */ 1983 /* last mode is usually default, array is low to high */
1995 for (i = 0; i < num_modes; i++) { 1984 for (i = 0; i < num_modes; i++) {
1996 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1985 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2342,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2342 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2331 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2343 2332
2344 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2333 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2334 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2335 power_info->pplib.ucNumStates, GFP_KERNEL);
2336 if (!rdev->pm.power_state)
2337 return state_index;
2345 /* first mode is usually default, followed by low to high */ 2338 /* first mode is usually default, followed by low to high */
2346 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 2339 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2347 mode_index = 0; 2340 mode_index = 0;
@@ -2422,6 +2415,10 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2422 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2423 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2424 power_info->pplib.usNonClockInfoArrayOffset); 2417 power_info->pplib.usNonClockInfoArrayOffset);
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state)
2421 return state_index;
2425 for (i = 0; i < state_array->ucNumEntries; i++) { 2422 for (i = 0; i < state_array->ucNumEntries; i++) {
2426 mode_index = 0; 2423 mode_index = 0;
2427 power_state = (union pplib_power_state *)&state_array->states[i]; 2424 power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2495,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2495 break; 2492 break;
2496 } 2493 }
2497 } else { 2494 } else {
2498 /* add the default mode */ 2495 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2499 rdev->pm.power_state[state_index].type = 2496 if (rdev->pm.power_state) {
2500 POWER_STATE_TYPE_DEFAULT; 2497 /* add the default mode */
2501 rdev->pm.power_state[state_index].num_clock_modes = 1; 2498 rdev->pm.power_state[state_index].type =
2502 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2499 POWER_STATE_TYPE_DEFAULT;
2503 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2500 rdev->pm.power_state[state_index].num_clock_modes = 1;
2504 rdev->pm.power_state[state_index].default_clock_mode = 2501 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2505 &rdev->pm.power_state[state_index].clock_info[0]; 2502 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2506 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2503 rdev->pm.power_state[state_index].default_clock_mode =
2507 rdev->pm.power_state[state_index].pcie_lanes = 16; 2504 &rdev->pm.power_state[state_index].clock_info[0];
2508 rdev->pm.default_power_state_index = state_index; 2505 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2509 rdev->pm.power_state[state_index].flags = 0; 2506 rdev->pm.power_state[state_index].pcie_lanes = 16;
2510 state_index++; 2507 rdev->pm.default_power_state_index = state_index;
2508 rdev->pm.power_state[state_index].flags = 0;
2509 state_index++;
2510 }
2511 } 2511 }
2512 2512
2513 rdev->pm.num_power_states = state_index; 2513 rdev->pm.num_power_states = state_index;
@@ -2623,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2623 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; 2623 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
2624 2624
2625 /* tell the bios not to handle mode switching */ 2625 /* tell the bios not to handle mode switching */
2626 bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); 2626 bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
2627 2627
2628 if (rdev->family >= CHIP_R600) { 2628 if (rdev->family >= CHIP_R600) {
2629 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); 2629 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2674,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
2674 else 2674 else
2675 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); 2675 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2676 2676
2677 if (lock) 2677 if (lock) {
2678 bios_6_scratch |= ATOM_S6_CRITICAL_STATE; 2678 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
2679 else 2679 bios_6_scratch &= ~ATOM_S6_ACC_MODE;
2680 } else {
2680 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; 2681 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
2682 bios_6_scratch |= ATOM_S6_ACC_MODE;
2683 }
2681 2684
2682 if (rdev->family >= CHIP_R600) 2685 if (rdev->family >= CHIP_R600)
2683 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); 2686 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 591fcae8f224..d27ef74590cd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2442,6 +2442,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2442 2442
2443 rdev->pm.default_power_state_index = -1; 2443 rdev->pm.default_power_state_index = -1;
2444 2444
2445 /* allocate 2 power states */
2446 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2447 if (!rdev->pm.power_state) {
2448 rdev->pm.default_power_state_index = state_index;
2449 rdev->pm.num_power_states = 0;
2450
2451 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2452 rdev->pm.current_clock_mode_index = 0;
2453 return;
2454 }
2455
2445 if (rdev->flags & RADEON_IS_MOBILITY) { 2456 if (rdev->flags & RADEON_IS_MOBILITY) {
2446 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2457 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2447 if (offset) { 2458 if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 26091d602b84..0d478932b1a9 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -891,9 +891,9 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
891 pci_disable_device(dev->pdev); 891 pci_disable_device(dev->pdev);
892 pci_set_power_state(dev->pdev, PCI_D3hot); 892 pci_set_power_state(dev->pdev, PCI_D3hot);
893 } 893 }
894 acquire_console_sem(); 894 console_lock();
895 radeon_fbdev_set_suspend(rdev, 1); 895 radeon_fbdev_set_suspend(rdev, 1);
896 release_console_sem(); 896 console_unlock();
897 return 0; 897 return 0;
898} 898}
899 899
@@ -905,11 +905,11 @@ int radeon_resume_kms(struct drm_device *dev)
905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
906 return 0; 906 return 0;
907 907
908 acquire_console_sem(); 908 console_lock();
909 pci_set_power_state(dev->pdev, PCI_D0); 909 pci_set_power_state(dev->pdev, PCI_D0);
910 pci_restore_state(dev->pdev); 910 pci_restore_state(dev->pdev);
911 if (pci_enable_device(dev->pdev)) { 911 if (pci_enable_device(dev->pdev)) {
912 release_console_sem(); 912 console_unlock();
913 return -1; 913 return -1;
914 } 914 }
915 pci_set_master(dev->pdev); 915 pci_set_master(dev->pdev);
@@ -920,7 +920,7 @@ int radeon_resume_kms(struct drm_device *dev)
920 radeon_restore_bios_scratch_regs(rdev); 920 radeon_restore_bios_scratch_regs(rdev);
921 921
922 radeon_fbdev_set_suspend(rdev, 0); 922 radeon_fbdev_set_suspend(rdev, 0);
923 release_console_sem(); 923 console_unlock();
924 924
925 /* reset hpd state */ 925 /* reset hpd state */
926 radeon_hpd_init(rdev); 926 radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d26dabf878d9..2eff98cfd728 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -780,6 +780,115 @@ static int radeon_ddc_dump(struct drm_connector *connector)
780 return ret; 780 return ret;
781} 781}
782 782
783/* avivo */
784static void avivo_get_fb_div(struct radeon_pll *pll,
785 u32 target_clock,
786 u32 post_div,
787 u32 ref_div,
788 u32 *fb_div,
789 u32 *frac_fb_div)
790{
791 u32 tmp = post_div * ref_div;
792
793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq;
796}
797
798static u32 avivo_get_post_div(struct radeon_pll *pll,
799 u32 target_clock)
800{
801 u32 vco, post_div, tmp;
802
803 if (pll->flags & RADEON_PLL_USE_POST_DIV)
804 return pll->post_div;
805
806 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
807 if (pll->flags & RADEON_PLL_IS_LCD)
808 vco = pll->lcd_pll_out_min;
809 else
810 vco = pll->pll_out_min;
811 } else {
812 if (pll->flags & RADEON_PLL_IS_LCD)
813 vco = pll->lcd_pll_out_max;
814 else
815 vco = pll->pll_out_max;
816 }
817
818 post_div = vco / target_clock;
819 tmp = vco % target_clock;
820
821 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
822 if (tmp)
823 post_div++;
824 } else {
825 if (!tmp)
826 post_div--;
827 }
828
829 return post_div;
830}
831
832#define MAX_TOLERANCE 10
833
834void radeon_compute_pll_avivo(struct radeon_pll *pll,
835 u32 freq,
836 u32 *dot_clock_p,
837 u32 *fb_div_p,
838 u32 *frac_fb_div_p,
839 u32 *ref_div_p,
840 u32 *post_div_p)
841{
842 u32 target_clock = freq / 10;
843 u32 post_div = avivo_get_post_div(pll, target_clock);
844 u32 ref_div = pll->min_ref_div;
845 u32 fb_div = 0, frac_fb_div = 0, tmp;
846
847 if (pll->flags & RADEON_PLL_USE_REF_DIV)
848 ref_div = pll->reference_div;
849
850 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
851 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
852 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
853 if (frac_fb_div >= 5) {
854 frac_fb_div -= 5;
855 frac_fb_div = frac_fb_div / 10;
856 frac_fb_div++;
857 }
858 if (frac_fb_div >= 10) {
859 fb_div++;
860 frac_fb_div = 0;
861 }
862 } else {
863 while (ref_div <= pll->max_ref_div) {
864 avivo_get_fb_div(pll, target_clock, post_div, ref_div,
865 &fb_div, &frac_fb_div);
866 if (frac_fb_div >= (pll->reference_freq / 2))
867 fb_div++;
868 frac_fb_div = 0;
869 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
870 tmp = (tmp * 10000) / target_clock;
871
872 if (tmp > (10000 + MAX_TOLERANCE))
873 ref_div++;
874 else if (tmp >= (10000 - MAX_TOLERANCE))
875 break;
876 else
877 ref_div++;
878 }
879 }
880
881 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
882 (ref_div * post_div * 10);
883 *fb_div_p = fb_div;
884 *frac_fb_div_p = frac_fb_div;
885 *ref_div_p = ref_div;
886 *post_div_p = post_div;
887 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
888 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
889}
890
891/* pre-avivo */
783static inline uint32_t radeon_div(uint64_t n, uint32_t d) 892static inline uint32_t radeon_div(uint64_t n, uint32_t d)
784{ 893{
785 uint64_t mod; 894 uint64_t mod;
@@ -790,13 +899,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
790 return n; 899 return n;
791} 900}
792 901
793void radeon_compute_pll(struct radeon_pll *pll, 902void radeon_compute_pll_legacy(struct radeon_pll *pll,
794 uint64_t freq, 903 uint64_t freq,
795 uint32_t *dot_clock_p, 904 uint32_t *dot_clock_p,
796 uint32_t *fb_div_p, 905 uint32_t *fb_div_p,
797 uint32_t *frac_fb_div_p, 906 uint32_t *frac_fb_div_p,
798 uint32_t *ref_div_p, 907 uint32_t *ref_div_p,
799 uint32_t *post_div_p) 908 uint32_t *post_div_p)
800{ 909{
801 uint32_t min_ref_div = pll->min_ref_div; 910 uint32_t min_ref_div = pll->min_ref_div;
802 uint32_t max_ref_div = pll->max_ref_div; 911 uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +935,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
826 pll_out_max = pll->pll_out_max; 935 pll_out_max = pll->pll_out_max;
827 } 936 }
828 937
938 if (pll_out_min > 64800)
939 pll_out_min = 64800;
940
829 if (pll->flags & RADEON_PLL_USE_REF_DIV) 941 if (pll->flags & RADEON_PLL_USE_REF_DIV)
830 min_ref_div = max_ref_div = pll->reference_div; 942 min_ref_div = max_ref_div = pll->reference_div;
831 else { 943 else {
@@ -849,7 +961,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
849 max_fractional_feed_div = pll->max_frac_feedback_div; 961 max_fractional_feed_div = pll->max_frac_feedback_div;
850 } 962 }
851 963
852 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 964 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
853 uint32_t ref_div; 965 uint32_t ref_div;
854 966
855 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 967 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -965,6 +1077,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
965 *frac_fb_div_p = best_frac_feedback_div; 1077 *frac_fb_div_p = best_frac_feedback_div;
966 *ref_div_p = best_ref_div; 1078 *ref_div_p = best_ref_div;
967 *post_div_p = best_post_div; 1079 *post_div_p = best_post_div;
1080 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1081 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1082 best_ref_div, best_post_div);
1083
968} 1084}
969 1085
970static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1086static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index be5cb4f28c29..275b26a708d6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,7 +48,7 @@
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) 49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 */ 52 */
53#define KMS_DRIVER_MAJOR 2 53#define KMS_DRIVER_MAJOR 2
54#define KMS_DRIVER_MINOR 8 54#define KMS_DRIVER_MINOR 8
@@ -104,6 +104,7 @@ int radeon_tv = 1;
104int radeon_audio = 1; 104int radeon_audio = 1;
105int radeon_disp_priority = 0; 105int radeon_disp_priority = 0;
106int radeon_hw_i2c = 0; 106int radeon_hw_i2c = 0;
107int radeon_pcie_gen2 = 0;
107 108
108MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 109MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
109module_param_named(no_wb, radeon_no_wb, int, 0444); 110module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -147,6 +148,9 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
147MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 148MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
148module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); 149module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
149 150
151MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
152module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
153
150static int radeon_suspend(struct drm_device *dev, pm_message_t state) 154static int radeon_suspend(struct drm_device *dev, pm_message_t state)
151{ 155{
152 drm_radeon_private_t *dev_priv = dev->dev_private; 156 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 8fd184286c0b..d4a542247618 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
641 switch (connector->connector_type) { 641 switch (connector->connector_type) {
642 case DRM_MODE_CONNECTOR_DVII: 642 case DRM_MODE_CONNECTOR_DVII:
643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
644 if (drm_detect_monitor_audio(radeon_connector->edid)) { 644 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
645 /* fix me */ 645 /* fix me */
646 if (ASIC_IS_DCE4(rdev)) 646 if (ASIC_IS_DCE4(rdev))
647 return ATOM_ENCODER_MODE_DVI; 647 return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
655 case DRM_MODE_CONNECTOR_DVID: 655 case DRM_MODE_CONNECTOR_DVID:
656 case DRM_MODE_CONNECTOR_HDMIA: 656 case DRM_MODE_CONNECTOR_HDMIA:
657 default: 657 default:
658 if (drm_detect_monitor_audio(radeon_connector->edid)) { 658 if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
659 /* fix me */ 659 /* fix me */
660 if (ASIC_IS_DCE4(rdev)) 660 if (ASIC_IS_DCE4(rdev))
661 return ATOM_ENCODER_MODE_DVI; 661 return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
675 return ATOM_ENCODER_MODE_DP; 675 return ATOM_ENCODER_MODE_DP;
676 else if (drm_detect_monitor_audio(radeon_connector->edid)) { 676 else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
677 /* fix me */ 677 /* fix me */
678 if (ASIC_IS_DCE4(rdev)) 678 if (ASIC_IS_DCE4(rdev))
679 return ATOM_ENCODER_MODE_DVI; 679 return ATOM_ENCODER_MODE_DVI;
@@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1063 if (!ASIC_IS_DCE4(rdev)) 1063 if (!ASIC_IS_DCE4(rdev))
1064 return; 1064 return;
1065 1065
1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || 1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1068 return; 1068 return;
1069 1069
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ca32e9c1e91d..66324b5bb5ba 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,6 +225,8 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
225 225
226 strcpy(info->fix.id, "radeondrmfb"); 226 strcpy(info->fix.id, "radeondrmfb");
227 227
228 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
229
228 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 230 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
229 info->fbops = &radeonfb_ops; 231 info->fbops = &radeonfb_ops;
230 232
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a289646e8aa4..9ec830c77af0 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -110,11 +110,14 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
110 110
111int radeon_irq_kms_init(struct radeon_device *rdev) 111int radeon_irq_kms_init(struct radeon_device *rdev)
112{ 112{
113 int i;
113 int r = 0; 114 int r = 0;
114 115
115 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 116 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
116 117
117 spin_lock_init(&rdev->irq.sw_lock); 118 spin_lock_init(&rdev->irq.sw_lock);
119 for (i = 0; i < rdev->num_crtc; i++)
120 spin_lock_init(&rdev->irq.pflip_lock[i]);
118 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 121 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
119 if (r) { 122 if (r) {
120 return r; 123 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 28a53e4a925f..8387d32caaa7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,6 +201,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
201 } 201 }
202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); 202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
203 break; 203 break;
204 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
205 /* return clock value in KHz */
206 value = rdev->clock.spll.reference_freq * 10;
207 break;
204 default: 208 default:
205 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 209 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
206 return -EINVAL; 210 return -EINVAL;
@@ -243,6 +247,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
243 struct radeon_device *rdev = dev->dev_private; 247 struct radeon_device *rdev = dev->dev_private;
244 if (rdev->hyperz_filp == file_priv) 248 if (rdev->hyperz_filp == file_priv)
245 rdev->hyperz_filp = NULL; 249 rdev->hyperz_filp = NULL;
250 if (rdev->cmask_filp == file_priv)
251 rdev->cmask_filp = NULL;
246} 252}
247 253
248/* 254/*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e6384d40..cf0638c3b7c7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
778 DRM_DEBUG_KMS("\n"); 778 DRM_DEBUG_KMS("\n");
779 779
780 if (!use_bios_divs) { 780 if (!use_bios_divs) {
781 radeon_compute_pll(pll, mode->clock, 781 radeon_compute_pll_legacy(pll, mode->clock,
782 &freq, &feedback_div, &frac_fb_div, 782 &freq, &feedback_div, &frac_fb_div,
783 &reference_div, &post_divider); 783 &reference_div, &post_divider);
784 784
785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
786 if (post_div->divider == post_divider) 786 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 12bdeab91c86..6794cdf91f28 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
150#define RADEON_PLL_USE_POST_DIV (1 << 12) 150#define RADEON_PLL_USE_POST_DIV (1 << 12)
151#define RADEON_PLL_IS_LCD (1 << 13) 151#define RADEON_PLL_IS_LCD (1 << 13)
152#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
152 153
153struct radeon_pll { 154struct radeon_pll {
154 /* reference frequency */ 155 /* reference frequency */
@@ -510,13 +511,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
510 struct radeon_atom_ss *ss, 511 struct radeon_atom_ss *ss,
511 int id, u32 clock); 512 int id, u32 clock);
512 513
513extern void radeon_compute_pll(struct radeon_pll *pll, 514extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
514 uint64_t freq, 515 uint64_t freq,
515 uint32_t *dot_clock_p, 516 uint32_t *dot_clock_p,
516 uint32_t *fb_div_p, 517 uint32_t *fb_div_p,
517 uint32_t *frac_fb_div_p, 518 uint32_t *frac_fb_div_p,
518 uint32_t *ref_div_p, 519 uint32_t *ref_div_p,
519 uint32_t *post_div_p); 520 uint32_t *post_div_p);
521
522extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
523 u32 freq,
524 u32 *dot_clock_p,
525 u32 *fb_div_p,
526 u32 *frac_fb_div_p,
527 u32 *ref_div_p,
528 u32 *post_div_p);
520 529
521extern void radeon_setup_encoder_clones(struct drm_device *dev); 530extern void radeon_setup_encoder_clones(struct drm_device *dev);
522 531
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3b1b2bf9cdd5..2aed03bde4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
430{ 430{
431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
432 struct radeon_device *rdev = ddev->dev_private; 432 struct radeon_device *rdev = ddev->dev_private;
433 u32 temp; 433 int temp;
434 434
435 switch (rdev->pm.int_thermal_type) { 435 switch (rdev->pm.int_thermal_type) {
436 case THERMAL_TYPE_RV6XX: 436 case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
646#endif 646#endif
647 } 647 }
648 648
649 if (rdev->pm.power_state)
650 kfree(rdev->pm.power_state);
651
649 radeon_hwmon_fini(rdev); 652 radeon_hwmon_fini(rdev);
650} 653}
651 654
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 3cd4dace57c7..ec93a75369e6 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -375,6 +375,8 @@
375#define RADEON_CONFIG_APER_SIZE 0x0108 375#define RADEON_CONFIG_APER_SIZE 0x0108
376#define RADEON_CONFIG_BONDS 0x00e8 376#define RADEON_CONFIG_BONDS 0x00e8
377#define RADEON_CONFIG_CNTL 0x00e0 377#define RADEON_CONFIG_CNTL 0x00e0
378# define RADEON_CFG_VGA_RAM_EN (1 << 8)
379# define RADEON_CFG_VGA_IO_DIS (1 << 9)
378# define RADEON_CFG_ATI_REV_A11 (0 << 16) 380# define RADEON_CFG_ATI_REV_A11 (0 << 16)
379# define RADEON_CFG_ATI_REV_A12 (1 << 16) 381# define RADEON_CFG_ATI_REV_A12 (1 << 16)
380# define RADEON_CFG_ATI_REV_A13 (2 << 16) 382# define RADEON_CFG_ATI_REV_A13 (2 << 16)
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index ac40fd39d787..9177f9191837 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -439,7 +439,7 @@ evergreen 0x9400
4390x000286EC SPI_COMPUTE_NUM_THREAD_X 4390x000286EC SPI_COMPUTE_NUM_THREAD_X
4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y 4400x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z 4410x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4420x000286F8 GDS_ADDR_SIZE 4420x00028724 GDS_ADDR_SIZE
4430x00028780 CB_BLEND0_CONTROL 4430x00028780 CB_BLEND0_CONTROL
4440x00028784 CB_BLEND1_CONTROL 4440x00028784 CB_BLEND1_CONTROL
4450x00028788 CB_BLEND2_CONTROL 4450x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5512e4e5e636..c76283d9eb3d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -203,6 +203,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
203 radeon_gart_table_ram_free(rdev); 203 radeon_gart_table_ram_free(rdev);
204} 204}
205 205
206#define RS400_PTE_WRITEABLE (1 << 2)
207#define RS400_PTE_READABLE (1 << 3)
208
206int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 209int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
207{ 210{
208 uint32_t entry; 211 uint32_t entry;
@@ -213,7 +216,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
213 216
214 entry = (lower_32_bits(addr) & PAGE_MASK) | 217 entry = (lower_32_bits(addr) & PAGE_MASK) |
215 ((upper_32_bits(addr) & 0xff) << 4) | 218 ((upper_32_bits(addr) & 0xff) << 4) |
216 0xc; 219 RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
217 entry = cpu_to_le32(entry); 220 entry = cpu_to_le32(entry);
218 rdev->gart.table.ram.ptr[i] = entry; 221 rdev->gart.table.ram.ptr[i] = entry;
219 return 0; 222 return 0;
@@ -226,8 +229,8 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
226 229
227 for (i = 0; i < rdev->usec_timeout; i++) { 230 for (i = 0; i < rdev->usec_timeout; i++) {
228 /* read MC_STATUS */ 231 /* read MC_STATUS */
229 tmp = RREG32(0x0150); 232 tmp = RREG32(RADEON_MC_STATUS);
230 if (tmp & (1 << 2)) { 233 if (tmp & RADEON_MC_IDLE) {
231 return 0; 234 return 0;
232 } 235 }
233 DRM_UDELAY(1); 236 DRM_UDELAY(1);
@@ -241,7 +244,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
241 r420_pipes_init(rdev); 244 r420_pipes_init(rdev);
242 if (rs400_mc_wait_for_idle(rdev)) { 245 if (rs400_mc_wait_for_idle(rdev)) {
243 printk(KERN_WARNING "rs400: Failed to wait MC idle while " 246 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
244 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150)); 247 "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
245 } 248 }
246} 249}
247 250
@@ -300,9 +303,9 @@ static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
300 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); 303 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
301 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); 304 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
302 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); 305 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
303 tmp = RREG32_MC(0x100); 306 tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
304 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); 307 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
305 tmp = RREG32(0x134); 308 tmp = RREG32(RS690_HDP_FB_LOCATION);
306 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); 309 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
307 } else { 310 } else {
308 tmp = RREG32(RADEON_AGP_BASE); 311 tmp = RREG32(RADEON_AGP_BASE);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b4192acaab5f..5afe294ed51f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -339,16 +339,16 @@ void rs600_bm_disable(struct radeon_device *rdev)
339 339
340int rs600_asic_reset(struct radeon_device *rdev) 340int rs600_asic_reset(struct radeon_device *rdev)
341{ 341{
342 u32 status, tmp;
343
344 struct rv515_mc_save save; 342 struct rv515_mc_save save;
343 u32 status, tmp;
344 int ret = 0;
345 345
346 /* Stops all mc clients */
347 rv515_mc_stop(rdev, &save);
348 status = RREG32(R_000E40_RBBM_STATUS); 346 status = RREG32(R_000E40_RBBM_STATUS);
349 if (!G_000E40_GUI_ACTIVE(status)) { 347 if (!G_000E40_GUI_ACTIVE(status)) {
350 return 0; 348 return 0;
351 } 349 }
350 /* Stops all mc clients */
351 rv515_mc_stop(rdev, &save);
352 status = RREG32(R_000E40_RBBM_STATUS); 352 status = RREG32(R_000E40_RBBM_STATUS);
353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
354 /* stop CP */ 354 /* stop CP */
@@ -392,11 +392,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
393 dev_err(rdev->dev, "failed to reset GPU\n"); 393 dev_err(rdev->dev, "failed to reset GPU\n");
394 rdev->gpu_lockup = true; 394 rdev->gpu_lockup = true;
395 return -1; 395 ret = -1;
396 } 396 } else
397 dev_info(rdev->dev, "GPU reset succeed\n");
397 rv515_mc_resume(rdev, &save); 398 rv515_mc_resume(rdev, &save);
398 dev_info(rdev->dev, "GPU reset succeed\n"); 399 return ret;
399 return 0;
400} 400}
401 401
402/* 402/*
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d569f41f4ae..64b57af93714 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -69,13 +69,13 @@ void rv515_ring_start(struct radeon_device *rdev)
69 ISYNC_CPSCRATCH_IDLEGUI); 69 ISYNC_CPSCRATCH_IDLEGUI);
70 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); 70 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
71 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); 71 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
72 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 72 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
73 radeon_ring_write(rdev, 1 << 31); 73 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
74 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); 74 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
75 radeon_ring_write(rdev, 0); 75 radeon_ring_write(rdev, 0);
76 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); 76 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
77 radeon_ring_write(rdev, 0); 77 radeon_ring_write(rdev, 0);
78 radeon_ring_write(rdev, PACKET0(0x42C8, 0)); 78 radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
79 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); 79 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
80 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); 80 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
81 radeon_ring_write(rdev, 0); 81 radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@ void rv515_gpu_init(struct radeon_device *rdev)
153 } 153 }
154 rv515_vga_render_disable(rdev); 154 rv515_vga_render_disable(rdev);
155 r420_pipes_init(rdev); 155 r420_pipes_init(rdev);
156 gb_pipe_select = RREG32(0x402C); 156 gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
157 tmp = RREG32(0x170C); 157 tmp = RREG32(R300_DST_PIPE_CONFIG);
158 pipe_select_current = (tmp >> 2) & 3; 158 pipe_select_current = (tmp >> 2) & 3;
159 tmp = (1 << pipe_select_current) | 159 tmp = (1 << pipe_select_current) |
160 (((gb_pipe_select >> 8) & 0xF) << 4); 160 (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3a264aa3a79a..2211a323db41 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
78} 78}
79 79
80/* get temperature in millidegrees */ 80/* get temperature in millidegrees */
81u32 rv770_get_temp(struct radeon_device *rdev) 81int rv770_get_temp(struct radeon_device *rdev)
82{ 82{
83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
84 ASIC_T_SHIFT; 84 ASIC_T_SHIFT;
85 u32 actual_temp = 0; 85 int actual_temp;
86 86
87 if ((temp >> 9) & 1) 87 if (temp & 0x400)
88 actual_temp = 0; 88 actual_temp = -256;
89 else 89 else if (temp & 0x200)
90 actual_temp = (temp >> 1) & 0xff; 90 actual_temp = 255;
91 91 else if (temp & 0x100) {
92 return actual_temp * 1000; 92 actual_temp = temp & 0x1ff;
93 actual_temp |= ~0x1ff;
94 } else
95 actual_temp = temp & 0xff;
96
97 return (actual_temp * 1000) / 2;
93} 98}
94 99
95void rv770_pm_misc(struct radeon_device *rdev) 100void rv770_pm_misc(struct radeon_device *rdev)
@@ -1268,7 +1273,7 @@ int rv770_init(struct radeon_device *rdev)
1268 if (r) 1273 if (r)
1269 return r; 1274 return r;
1270 /* Post card if necessary */ 1275 /* Post card if necessary */
1271 if (!r600_card_posted(rdev)) { 1276 if (!radeon_card_posted(rdev)) {
1272 if (!rdev->bios) { 1277 if (!rdev->bios) {
1273 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1278 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1274 return -EINVAL; 1279 return -EINVAL;
@@ -1372,6 +1377,9 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1372 u32 link_width_cntl, lanes, speed_cntl, tmp; 1377 u32 link_width_cntl, lanes, speed_cntl, tmp;
1373 u16 link_cntl2; 1378 u16 link_cntl2;
1374 1379
1380 if (radeon_pcie_gen2 == 0)
1381 return;
1382
1375 if (rdev->flags & RADEON_IS_IGP) 1383 if (rdev->flags & RADEON_IS_IGP)
1376 return; 1384 return;
1377 1385
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 0e1edd7311ff..70e60a4bb678 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,12 +1,13 @@
1config STUB_POULSBO 1config STUB_POULSBO
2 tristate "Intel GMA500 Stub Driver" 2 tristate "Intel GMA500 Stub Driver"
3 depends on PCI 3 depends on PCI
4 depends on NET # for THERMAL
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI 7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI 8 select INPUT if ACPI
9 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
10 select THERMAL if ACPI
10 help 11 help
11 Choose this option if you have a system that has Intel GMA500 12 Choose this option if you have a system that has Intel GMA500
12 (Poulsbo) integrated graphics. If M is selected, the module will 13 (Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a22027..96c83a9a76bb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
1config VGA_ARB 1config VGA_ARB
2 bool "VGA Arbitration" if EMBEDDED 2 bool "VGA Arbitration" if EXPERT
3 default y 3 default y
4 depends on PCI 4 depends on PCI
5 help 5 help
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65da417..ace2b1623b21 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@ int vga_client_register(struct pci_dev *pdev, void *cookie,
636 void (*irq_set_state)(void *cookie, bool state), 636 void (*irq_set_state)(void *cookie, bool state),
637 unsigned int (*set_vga_decode)(void *cookie, bool decode)) 637 unsigned int (*set_vga_decode)(void *cookie, bool decode))
638{ 638{
639 int ret = -1; 639 int ret = -ENODEV;
640 struct vga_device *vgadev; 640 struct vga_device *vgadev;
641 unsigned long flags; 641 unsigned long flags;
642 642
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 24cca2f69dfc..2560f01c1a63 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -62,9 +62,9 @@ config HID_3M_PCT
62 Support for 3M PCT touch screens. 62 Support for 3M PCT touch screens.
63 63
64config HID_A4TECH 64config HID_A4TECH
65 tristate "A4 tech mice" if EMBEDDED 65 tristate "A4 tech mice" if EXPERT
66 depends on USB_HID 66 depends on USB_HID
67 default !EMBEDDED 67 default !EXPERT
68 ---help--- 68 ---help---
69 Support for A4 tech X5 and WOP-35 / Trust 450L mice. 69 Support for A4 tech X5 and WOP-35 / Trust 450L mice.
70 70
@@ -77,9 +77,9 @@ config HID_ACRUX_FF
77 game controllers. 77 game controllers.
78 78
79config HID_APPLE 79config HID_APPLE
80 tristate "Apple {i,Power,Mac}Books" if EMBEDDED 80 tristate "Apple {i,Power,Mac}Books" if EXPERT
81 depends on (USB_HID || BT_HIDP) 81 depends on (USB_HID || BT_HIDP)
82 default !EMBEDDED 82 default !EXPERT
83 ---help--- 83 ---help---
84 Support for some Apple devices which less or more break 84 Support for some Apple devices which less or more break
85 HID specification. 85 HID specification.
@@ -88,9 +88,9 @@ config HID_APPLE
88 MacBooks, MacBook Pros and Apple Aluminum. 88 MacBooks, MacBook Pros and Apple Aluminum.
89 89
90config HID_BELKIN 90config HID_BELKIN
91 tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED 91 tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
92 depends on USB_HID 92 depends on USB_HID
93 default !EMBEDDED 93 default !EXPERT
94 ---help--- 94 ---help---
95 Support for Belkin Flip KVM and Wireless keyboard. 95 Support for Belkin Flip KVM and Wireless keyboard.
96 96
@@ -101,16 +101,16 @@ config HID_CANDO
101 Support for Cando dual touch panel. 101 Support for Cando dual touch panel.
102 102
103config HID_CHERRY 103config HID_CHERRY
104 tristate "Cherry Cymotion keyboard" if EMBEDDED 104 tristate "Cherry Cymotion keyboard" if EXPERT
105 depends on USB_HID 105 depends on USB_HID
106 default !EMBEDDED 106 default !EXPERT
107 ---help--- 107 ---help---
108 Support for Cherry Cymotion keyboard. 108 Support for Cherry Cymotion keyboard.
109 109
110config HID_CHICONY 110config HID_CHICONY
111 tristate "Chicony Tactical pad" if EMBEDDED 111 tristate "Chicony Tactical pad" if EXPERT
112 depends on USB_HID 112 depends on USB_HID
113 default !EMBEDDED 113 default !EXPERT
114 ---help--- 114 ---help---
115 Support for Chicony Tactical pad. 115 Support for Chicony Tactical pad.
116 116
@@ -130,9 +130,9 @@ config HID_PRODIKEYS
130 and some additional multimedia keys. 130 and some additional multimedia keys.
131 131
132config HID_CYPRESS 132config HID_CYPRESS
133 tristate "Cypress mouse and barcode readers" if EMBEDDED 133 tristate "Cypress mouse and barcode readers" if EXPERT
134 depends on USB_HID 134 depends on USB_HID
135 default !EMBEDDED 135 default !EXPERT
136 ---help--- 136 ---help---
137 Support for cypress mouse and barcode readers. 137 Support for cypress mouse and barcode readers.
138 138
@@ -174,16 +174,16 @@ config HID_ELECOM
174 Support for the ELECOM BM084 (bluetooth mouse). 174 Support for the ELECOM BM084 (bluetooth mouse).
175 175
176config HID_EZKEY 176config HID_EZKEY
177 tristate "Ezkey BTC 8193 keyboard" if EMBEDDED 177 tristate "Ezkey BTC 8193 keyboard" if EXPERT
178 depends on USB_HID 178 depends on USB_HID
179 default !EMBEDDED 179 default !EXPERT
180 ---help--- 180 ---help---
181 Support for Ezkey BTC 8193 keyboard. 181 Support for Ezkey BTC 8193 keyboard.
182 182
183config HID_KYE 183config HID_KYE
184 tristate "Kye/Genius Ergo Mouse" if EMBEDDED 184 tristate "Kye/Genius Ergo Mouse" if EXPERT
185 depends on USB_HID 185 depends on USB_HID
186 default !EMBEDDED 186 default !EXPERT
187 ---help--- 187 ---help---
188 Support for Kye/Genius Ergo Mouse. 188 Support for Kye/Genius Ergo Mouse.
189 189
@@ -212,16 +212,16 @@ config HID_TWINHAN
212 Support for Twinhan IR remote control. 212 Support for Twinhan IR remote control.
213 213
214config HID_KENSINGTON 214config HID_KENSINGTON
215 tristate "Kensington Slimblade Trackball" if EMBEDDED 215 tristate "Kensington Slimblade Trackball" if EXPERT
216 depends on USB_HID 216 depends on USB_HID
217 default !EMBEDDED 217 default !EXPERT
218 ---help--- 218 ---help---
219 Support for Kensington Slimblade Trackball. 219 Support for Kensington Slimblade Trackball.
220 220
221config HID_LOGITECH 221config HID_LOGITECH
222 tristate "Logitech devices" if EMBEDDED 222 tristate "Logitech devices" if EXPERT
223 depends on USB_HID 223 depends on USB_HID
224 default !EMBEDDED 224 default !EXPERT
225 ---help--- 225 ---help---
226 Support for Logitech devices that are not fully compliant with HID standard. 226 Support for Logitech devices that are not fully compliant with HID standard.
227 227
@@ -276,9 +276,9 @@ config HID_MAGICMOUSE
276 Apple Wireless "Magic" Mouse. 276 Apple Wireless "Magic" Mouse.
277 277
278config HID_MICROSOFT 278config HID_MICROSOFT
279 tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED 279 tristate "Microsoft non-fully HID-compliant devices" if EXPERT
280 depends on USB_HID 280 depends on USB_HID
281 default !EMBEDDED 281 default !EXPERT
282 ---help--- 282 ---help---
283 Support for Microsoft devices that are not fully compliant with HID standard. 283 Support for Microsoft devices that are not fully compliant with HID standard.
284 284
@@ -289,9 +289,9 @@ config HID_MOSART
289 Support for MosArt dual-touch panels. 289 Support for MosArt dual-touch panels.
290 290
291config HID_MONTEREY 291config HID_MONTEREY
292 tristate "Monterey Genius KB29E keyboard" if EMBEDDED 292 tristate "Monterey Genius KB29E keyboard" if EXPERT
293 depends on USB_HID 293 depends on USB_HID
294 default !EMBEDDED 294 default !EXPERT
295 ---help--- 295 ---help---
296 Support for Monterey Genius KB29E. 296 Support for Monterey Genius KB29E.
297 297
@@ -365,8 +365,8 @@ config HID_PICOLCD
365 - IR 365 - IR
366 366
367config HID_PICOLCD_FB 367config HID_PICOLCD_FB
368 bool "Framebuffer support" if EMBEDDED 368 bool "Framebuffer support" if EXPERT
369 default !EMBEDDED 369 default !EXPERT
370 depends on HID_PICOLCD 370 depends on HID_PICOLCD
371 depends on HID_PICOLCD=FB || FB=y 371 depends on HID_PICOLCD=FB || FB=y
372 select FB_DEFERRED_IO 372 select FB_DEFERRED_IO
@@ -379,8 +379,8 @@ config HID_PICOLCD_FB
379 frambuffer device. 379 frambuffer device.
380 380
381config HID_PICOLCD_BACKLIGHT 381config HID_PICOLCD_BACKLIGHT
382 bool "Backlight control" if EMBEDDED 382 bool "Backlight control" if EXPERT
383 default !EMBEDDED 383 default !EXPERT
384 depends on HID_PICOLCD 384 depends on HID_PICOLCD
385 depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y 385 depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
386 ---help--- 386 ---help---
@@ -388,16 +388,16 @@ config HID_PICOLCD_BACKLIGHT
388 class. 388 class.
389 389
390config HID_PICOLCD_LCD 390config HID_PICOLCD_LCD
391 bool "Contrast control" if EMBEDDED 391 bool "Contrast control" if EXPERT
392 default !EMBEDDED 392 default !EXPERT
393 depends on HID_PICOLCD 393 depends on HID_PICOLCD
394 depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y 394 depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
395 ---help--- 395 ---help---
396 Provide access to PicoLCD's LCD contrast via lcd class. 396 Provide access to PicoLCD's LCD contrast via lcd class.
397 397
398config HID_PICOLCD_LEDS 398config HID_PICOLCD_LEDS
399 bool "GPO via leds class" if EMBEDDED 399 bool "GPO via leds class" if EXPERT
400 default !EMBEDDED 400 default !EXPERT
401 depends on HID_PICOLCD 401 depends on HID_PICOLCD
402 depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y 402 depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
403 ---help--- 403 ---help---
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 4edb3bef94a6..0f20fd17cf06 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -45,7 +45,7 @@ config USB_HIDDEV
45 If unsure, say Y. 45 If unsure, say Y.
46 46
47menu "USB HID Boot Protocol drivers" 47menu "USB HID Boot Protocol drivers"
48 depends on USB!=n && USB_HID!=y && EMBEDDED 48 depends on USB!=n && USB_HID!=y && EXPERT
49 49
50config USB_KBD 50config USB_KBD
51 tristate "USB HIDBP Keyboard (simple Boot) support" 51 tristate "USB HIDBP Keyboard (simple Boot) support"
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 35f00dae3676..773e484f1646 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -618,8 +618,8 @@ config SENSORS_LM93
618 depends on I2C 618 depends on I2C
619 select HWMON_VID 619 select HWMON_VID
620 help 620 help
621 If you say yes here you get support for National Semiconductor LM93 621 If you say yes here you get support for National Semiconductor LM93,
622 sensor chips. 622 LM94, and compatible sensor chips.
623 623
624 This driver can also be built as a module. If so, the module 624 This driver can also be built as a module. If so, the module
625 will be called lm93. 625 will be called lm93.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index ce0372f0615e..4c0743660e9c 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1072,6 +1072,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
1072 node->sda.dev_attr.show = grp->show; 1072 node->sda.dev_attr.show = grp->show;
1073 node->sda.dev_attr.store = grp->store; 1073 node->sda.dev_attr.store = grp->store;
1074 attr = &node->sda.dev_attr.attr; 1074 attr = &node->sda.dev_attr.attr;
1075 sysfs_attr_init(attr);
1075 attr->name = node->name; 1076 attr->name = node->name;
1076 attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0); 1077 attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
1077 ret = sysfs_create_file(&pdev->dev.kobj, attr); 1078 ret = sysfs_create_file(&pdev->dev.kobj, attr);
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 2d68cf3c223b..b5e892017e0c 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/dmi.h>
16 17
17#include <acpi/acpi.h> 18#include <acpi/acpi.h>
18#include <acpi/acpixf.h> 19#include <acpi/acpixf.h>
@@ -22,6 +23,21 @@
22 23
23#define ATK_HID "ATK0110" 24#define ATK_HID "ATK0110"
24 25
26static bool new_if;
27module_param(new_if, bool, 0);
28MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
29
30static const struct dmi_system_id __initconst atk_force_new_if[] = {
31 {
32 /* Old interface has broken MCH temp monitoring */
33 .ident = "Asus Sabertooth X58",
34 .matches = {
35 DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
36 }
37 },
38 { }
39};
40
25/* Minimum time between readings, enforced in order to avoid 41/* Minimum time between readings, enforced in order to avoid
26 * hogging the CPU. 42 * hogging the CPU.
27 */ 43 */
@@ -1302,7 +1318,9 @@ static int atk_probe_if(struct atk_data *data)
1302 * analysis of multiple DSDTs indicates that when both interfaces 1318 * analysis of multiple DSDTs indicates that when both interfaces
1303 * are present the new one (GGRP/GITM) is not functional. 1319 * are present the new one (GGRP/GITM) is not functional.
1304 */ 1320 */
1305 if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle) 1321 if (new_if)
1322 dev_info(dev, "Overriding interface detection\n");
1323 if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle && !new_if)
1306 data->old_interface = true; 1324 data->old_interface = true;
1307 else if (data->enumerate_handle && data->read_handle && 1325 else if (data->enumerate_handle && data->read_handle &&
1308 data->write_handle) 1326 data->write_handle)
@@ -1420,6 +1438,9 @@ static int __init atk0110_init(void)
1420 return -EBUSY; 1438 return -EBUSY;
1421 } 1439 }
1422 1440
1441 if (dmi_check_system(atk_force_new_if))
1442 new_if = true;
1443
1423 ret = acpi_bus_register_driver(&atk_driver); 1444 ret = acpi_bus_register_driver(&atk_driver);
1424 if (ret) 1445 if (ret)
1425 pr_info("acpi_bus_register_driver failed: %d\n", ret); 1446 pr_info("acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5dea9faa1656..cd2a6e437aec 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client)
344} 344}
345 345
346static const unsigned short emc1403_address_list[] = { 346static const unsigned short emc1403_address_list[] = {
347 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END 347 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
348}; 348};
349 349
350static const struct i2c_device_id emc1403_idtable[] = { 350static const struct i2c_device_id emc1403_idtable[] = {
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 1b674b7d4584..d805e8e57967 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -957,7 +957,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
957 957
958 /* bail if we did not get an IRQ from the bus layer */ 958 /* bail if we did not get an IRQ from the bus layer */
959 if (!dev->irq) { 959 if (!dev->irq) {
960 pr_err("No IRQ. Disabling /dev/freefall\n"); 960 pr_debug("No IRQ. Disabling /dev/freefall\n");
961 goto out; 961 goto out;
962 } 962 }
963 963
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3019d2..508cb291f71b 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
98 * value, it uses signed 8-bit values with LSB = 1 degree Celsius. 98 * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
99 * For remote temperature, low and high limits, it uses signed 11-bit values 99 * For remote temperature, low and high limits, it uses signed 11-bit values
100 * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers. 100 * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
101 * For LM64 the actual remote diode temperature is 16 degree Celsius higher
102 * than the register reading. Remote temperature setpoints have to be
103 * adapted accordingly.
101 */ 104 */
102 105
103#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \ 106#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@ struct lm63_data {
165 struct mutex update_lock; 168 struct mutex update_lock;
166 char valid; /* zero until following fields are valid */ 169 char valid; /* zero until following fields are valid */
167 unsigned long last_updated; /* in jiffies */ 170 unsigned long last_updated; /* in jiffies */
171 int kind;
172 int temp2_offset;
168 173
169 /* registers values */ 174 /* registers values */
170 u8 config, config_fan; 175 u8 config, config_fan;
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
247 return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); 252 return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
248} 253}
249 254
250static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, 255/*
251 char *buf) 256 * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
257 * For remote sensor registers temp2_offset has to be considered,
258 * for local sensor it must not.
259 * So we need separate 8bit accessors for local and remote sensor.
260 */
261static ssize_t show_local_temp8(struct device *dev,
262 struct device_attribute *devattr,
263 char *buf)
252{ 264{
253 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 265 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
254 struct lm63_data *data = lm63_update_device(dev); 266 struct lm63_data *data = lm63_update_device(dev);
255 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])); 267 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
256} 268}
257 269
258static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy, 270static ssize_t show_remote_temp8(struct device *dev,
259 const char *buf, size_t count) 271 struct device_attribute *devattr,
272 char *buf)
273{
274 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
275 struct lm63_data *data = lm63_update_device(dev);
276 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
277 + data->temp2_offset);
278}
279
280static ssize_t set_local_temp8(struct device *dev,
281 struct device_attribute *dummy,
282 const char *buf, size_t count)
260{ 283{
261 struct i2c_client *client = to_i2c_client(dev); 284 struct i2c_client *client = to_i2c_client(dev);
262 struct lm63_data *data = i2c_get_clientdata(client); 285 struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
274{ 297{
275 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 298 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
276 struct lm63_data *data = lm63_update_device(dev); 299 struct lm63_data *data = lm63_update_device(dev);
277 return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])); 300 return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
301 + data->temp2_offset);
278} 302}
279 303
280static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, 304static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
294 int nr = attr->index; 318 int nr = attr->index;
295 319
296 mutex_lock(&data->update_lock); 320 mutex_lock(&data->update_lock);
297 data->temp11[nr] = TEMP11_TO_REG(val); 321 data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
298 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], 322 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
299 data->temp11[nr] >> 8); 323 data->temp11[nr] >> 8);
300 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], 324 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
310{ 334{
311 struct lm63_data *data = lm63_update_device(dev); 335 struct lm63_data *data = lm63_update_device(dev);
312 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) 336 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
337 + data->temp2_offset
313 - TEMP8_FROM_REG(data->temp2_crit_hyst)); 338 - TEMP8_FROM_REG(data->temp2_crit_hyst));
314} 339}
315 340
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
324 long hyst; 349 long hyst;
325 350
326 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
327 hyst = TEMP8_FROM_REG(data->temp8[2]) - val; 352 hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
328 i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, 353 i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
329 HYST_TO_REG(hyst)); 354 HYST_TO_REG(hyst));
330 mutex_unlock(&data->update_lock); 355 mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
355static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); 380static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
356static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); 381static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
357 382
358static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0); 383static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
359static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8, 384static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
360 set_temp8, 1); 385 set_local_temp8, 1);
361 386
362static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); 387static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
363static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, 388static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
364 set_temp11, 1); 389 set_temp11, 1);
365static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, 390static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
366 set_temp11, 2); 391 set_temp11, 2);
367static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2); 392/*
393 * On LM63, temp2_crit can be set only once, which should be job
394 * of the bootloader.
395 */
396static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
397 NULL, 2);
368static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, 398static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
369 set_temp2_crit_hyst); 399 set_temp2_crit_hyst);
370 400
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
479 data->valid = 0; 509 data->valid = 0;
480 mutex_init(&data->update_lock); 510 mutex_init(&data->update_lock);
481 511
482 /* Initialize the LM63 chip */ 512 /* Set the device type */
513 data->kind = id->driver_data;
514 if (data->kind == lm64)
515 data->temp2_offset = 16000;
516
517 /* Initialize chip */
483 lm63_init_client(new_client); 518 lm63_init_client(new_client);
484 519
485 /* Register sysfs hooks */ 520 /* Register sysfs hooks */
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index c9ed14eba5a6..3b43df418613 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -135,6 +135,11 @@
135#define LM93_MFR_ID 0x73 135#define LM93_MFR_ID 0x73
136#define LM93_MFR_ID_PROTOTYPE 0x72 136#define LM93_MFR_ID_PROTOTYPE 0x72
137 137
138/* LM94 REGISTER VALUES */
139#define LM94_MFR_ID_2 0x7a
140#define LM94_MFR_ID 0x79
141#define LM94_MFR_ID_PROTOTYPE 0x78
142
138/* SMBus capabilities */ 143/* SMBus capabilities */
139#define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \ 144#define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \
140 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA) 145 I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)
@@ -2504,6 +2509,7 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
2504{ 2509{
2505 struct i2c_adapter *adapter = client->adapter; 2510 struct i2c_adapter *adapter = client->adapter;
2506 int mfr, ver; 2511 int mfr, ver;
2512 const char *name;
2507 2513
2508 if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN)) 2514 if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN))
2509 return -ENODEV; 2515 return -ENODEV;
@@ -2517,13 +2523,23 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
2517 } 2523 }
2518 2524
2519 ver = lm93_read_byte(client, LM93_REG_VER); 2525 ver = lm93_read_byte(client, LM93_REG_VER);
2520 if (ver != LM93_MFR_ID && ver != LM93_MFR_ID_PROTOTYPE) { 2526 switch (ver) {
2527 case LM93_MFR_ID:
2528 case LM93_MFR_ID_PROTOTYPE:
2529 name = "lm93";
2530 break;
2531 case LM94_MFR_ID_2:
2532 case LM94_MFR_ID:
2533 case LM94_MFR_ID_PROTOTYPE:
2534 name = "lm94";
2535 break;
2536 default:
2521 dev_dbg(&adapter->dev, 2537 dev_dbg(&adapter->dev,
2522 "detect failed, bad version id 0x%02x!\n", ver); 2538 "detect failed, bad version id 0x%02x!\n", ver);
2523 return -ENODEV; 2539 return -ENODEV;
2524 } 2540 }
2525 2541
2526 strlcpy(info->type, "lm93", I2C_NAME_SIZE); 2542 strlcpy(info->type, name, I2C_NAME_SIZE);
2527 dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n", 2543 dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
2528 client->name, i2c_adapter_id(client->adapter), 2544 client->name, i2c_adapter_id(client->adapter),
2529 client->addr); 2545 client->addr);
@@ -2602,6 +2618,7 @@ static int lm93_remove(struct i2c_client *client)
2602 2618
2603static const struct i2c_device_id lm93_id[] = { 2619static const struct i2c_device_id lm93_id[] = {
2604 { "lm93", 0 }, 2620 { "lm93", 0 },
2621 { "lm94", 0 },
2605 { } 2622 { }
2606}; 2623};
2607MODULE_DEVICE_TABLE(i2c, lm93_id); 2624MODULE_DEVICE_TABLE(i2c, lm93_id);
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 53fab518b3da..986e5f62debe 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -29,6 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/platform_device.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
@@ -40,6 +41,7 @@
40 41
41MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); 42MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
42MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); 43MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver");
44MODULE_ALIAS("platform:cs5535-smb");
43MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
44 46
45#define MAX_DEVICES 4 47#define MAX_DEVICES 4
@@ -84,10 +86,6 @@ struct scx200_acb_iface {
84 u8 *ptr; 86 u8 *ptr;
85 char needs_reset; 87 char needs_reset;
86 unsigned len; 88 unsigned len;
87
88 /* PCI device info */
89 struct pci_dev *pdev;
90 int bar;
91}; 89};
92 90
93/* Register Definitions */ 91/* Register Definitions */
@@ -391,7 +389,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
391static struct scx200_acb_iface *scx200_acb_list; 389static struct scx200_acb_iface *scx200_acb_list;
392static DEFINE_MUTEX(scx200_acb_list_mutex); 390static DEFINE_MUTEX(scx200_acb_list_mutex);
393 391
394static __init int scx200_acb_probe(struct scx200_acb_iface *iface) 392static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
395{ 393{
396 u8 val; 394 u8 val;
397 395
@@ -427,7 +425,7 @@ static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
427 return 0; 425 return 0;
428} 426}
429 427
430static __init struct scx200_acb_iface *scx200_create_iface(const char *text, 428static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
431 struct device *dev, int index) 429 struct device *dev, int index)
432{ 430{
433 struct scx200_acb_iface *iface; 431 struct scx200_acb_iface *iface;
@@ -452,7 +450,7 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
452 return iface; 450 return iface;
453} 451}
454 452
455static int __init scx200_acb_create(struct scx200_acb_iface *iface) 453static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
456{ 454{
457 struct i2c_adapter *adapter; 455 struct i2c_adapter *adapter;
458 int rc; 456 int rc;
@@ -472,183 +470,145 @@ static int __init scx200_acb_create(struct scx200_acb_iface *iface)
472 return -ENODEV; 470 return -ENODEV;
473 } 471 }
474 472
475 mutex_lock(&scx200_acb_list_mutex); 473 if (!adapter->dev.parent) {
476 iface->next = scx200_acb_list; 474 /* If there's no dev, we're tracking (ISA) ifaces manually */
477 scx200_acb_list = iface; 475 mutex_lock(&scx200_acb_list_mutex);
478 mutex_unlock(&scx200_acb_list_mutex); 476 iface->next = scx200_acb_list;
477 scx200_acb_list = iface;
478 mutex_unlock(&scx200_acb_list_mutex);
479 }
479 480
480 return 0; 481 return 0;
481} 482}
482 483
483static __init int scx200_create_pci(const char *text, struct pci_dev *pdev, 484static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
484 int bar) 485 unsigned long base, int index, struct device *dev)
485{ 486{
486 struct scx200_acb_iface *iface; 487 struct scx200_acb_iface *iface;
487 int rc; 488 int rc;
488 489
489 iface = scx200_create_iface(text, &pdev->dev, 0); 490 iface = scx200_create_iface(text, dev, index);
490 491
491 if (iface == NULL) 492 if (iface == NULL)
492 return -ENOMEM; 493 return NULL;
493
494 iface->pdev = pdev;
495 iface->bar = bar;
496
497 rc = pci_enable_device_io(iface->pdev);
498 if (rc)
499 goto errout_free;
500 494
501 rc = pci_request_region(iface->pdev, iface->bar, iface->adapter.name); 495 if (!request_region(base, 8, iface->adapter.name)) {
502 if (rc) { 496 printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
503 printk(KERN_ERR NAME ": can't allocate PCI BAR %d\n", 497 base, base + 8 - 1);
504 iface->bar);
505 goto errout_free; 498 goto errout_free;
506 } 499 }
507 500
508 iface->base = pci_resource_start(iface->pdev, iface->bar); 501 iface->base = base;
509 rc = scx200_acb_create(iface); 502 rc = scx200_acb_create(iface);
510 503
511 if (rc == 0) 504 if (rc == 0)
512 return 0; 505 return iface;
513 506
514 pci_release_region(iface->pdev, iface->bar); 507 release_region(base, 8);
515 pci_dev_put(iface->pdev);
516 errout_free: 508 errout_free:
517 kfree(iface); 509 kfree(iface);
518 return rc; 510 return NULL;
519} 511}
520 512
521static int __init scx200_create_isa(const char *text, unsigned long base, 513static int __devinit scx200_probe(struct platform_device *pdev)
522 int index)
523{ 514{
524 struct scx200_acb_iface *iface; 515 struct scx200_acb_iface *iface;
525 int rc; 516 struct resource *res;
526
527 iface = scx200_create_iface(text, NULL, index);
528
529 if (iface == NULL)
530 return -ENOMEM;
531 517
532 if (!request_region(base, 8, iface->adapter.name)) { 518 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
533 printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", 519 if (!res) {
534 base, base + 8 - 1); 520 dev_err(&pdev->dev, "can't fetch device resource info\n");
535 rc = -EBUSY; 521 return -ENODEV;
536 goto errout_free;
537 } 522 }
538 523
539 iface->base = base; 524 iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
540 rc = scx200_acb_create(iface); 525 if (!iface)
526 return -EIO;
541 527
542 if (rc == 0) 528 dev_info(&pdev->dev, "SCx200 device '%s' registered\n",
543 return 0; 529 iface->adapter.name);
530 platform_set_drvdata(pdev, iface);
544 531
545 release_region(base, 8); 532 return 0;
546 errout_free:
547 kfree(iface);
548 return rc;
549} 533}
550 534
551/* Driver data is an index into the scx200_data array that indicates 535static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
552 * the name and the BAR where the I/O address resource is located. ISA 536{
553 * devices are flagged with a bar value of -1 */ 537 i2c_del_adapter(&iface->adapter);
554 538 release_region(iface->base, 8);
555static const struct pci_device_id scx200_pci[] __initconst = { 539 kfree(iface);
556 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE), 540}
557 .driver_data = 0 },
558 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
559 .driver_data = 0 },
560 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
561 .driver_data = 1 },
562 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
563 .driver_data = 2 },
564 { 0, }
565};
566
567static struct {
568 const char *name;
569 int bar;
570} scx200_data[] = {
571 { "SCx200", -1 },
572 { "CS5535", 0 },
573 { "CS5536", 0 }
574};
575 541
576static __init int scx200_scan_pci(void) 542static int __devexit scx200_remove(struct platform_device *pdev)
577{ 543{
578 int data, dev; 544 struct scx200_acb_iface *iface;
579 int rc = -ENODEV;
580 struct pci_dev *pdev;
581 545
582 for(dev = 0; dev < ARRAY_SIZE(scx200_pci); dev++) { 546 iface = platform_get_drvdata(pdev);
583 pdev = pci_get_device(scx200_pci[dev].vendor, 547 platform_set_drvdata(pdev, NULL);
584 scx200_pci[dev].device, NULL); 548 scx200_cleanup_iface(iface);
585 549
586 if (pdev == NULL) 550 return 0;
587 continue; 551}
588 552
589 data = scx200_pci[dev].driver_data; 553static struct platform_driver scx200_pci_drv = {
554 .driver = {
555 .name = "cs5535-smb",
556 .owner = THIS_MODULE,
557 },
558 .probe = scx200_probe,
559 .remove = __devexit_p(scx200_remove),
560};
590 561
591 /* if .bar is greater or equal to zero, this is a 562static const struct pci_device_id scx200_isa[] __initconst = {
592 * PCI device - otherwise, we assume 563 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
593 that the ports are ISA based 564 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
594 */ 565 { 0, }
566};
595 567
596 if (scx200_data[data].bar >= 0) 568static __init void scx200_scan_isa(void)
597 rc = scx200_create_pci(scx200_data[data].name, pdev, 569{
598 scx200_data[data].bar); 570 int i;
599 else {
600 int i;
601 571
602 pci_dev_put(pdev); 572 if (!pci_dev_present(scx200_isa))
603 for (i = 0; i < MAX_DEVICES; ++i) { 573 return;
604 if (base[i] == 0)
605 continue;
606 574
607 rc = scx200_create_isa(scx200_data[data].name, 575 for (i = 0; i < MAX_DEVICES; ++i) {
608 base[i], 576 if (base[i] == 0)
609 i); 577 continue;
610 }
611 }
612 578
613 break; 579 /* XXX: should we care about failures? */
580 scx200_create_dev("SCx200", base[i], i, NULL);
614 } 581 }
615
616 return rc;
617} 582}
618 583
619static int __init scx200_acb_init(void) 584static int __init scx200_acb_init(void)
620{ 585{
621 int rc;
622
623 pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n"); 586 pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n");
624 587
625 rc = scx200_scan_pci(); 588 /* First scan for ISA-based devices */
589 scx200_scan_isa(); /* XXX: should we care about errors? */
626 590
627 /* If at least one bus was created, init must succeed */ 591 /* If at least one bus was created, init must succeed */
628 if (scx200_acb_list) 592 if (scx200_acb_list)
629 return 0; 593 return 0;
630 return rc; 594
595 /* No ISA devices; register the platform driver for PCI-based devices */
596 return platform_driver_register(&scx200_pci_drv);
631} 597}
632 598
633static void __exit scx200_acb_cleanup(void) 599static void __exit scx200_acb_cleanup(void)
634{ 600{
635 struct scx200_acb_iface *iface; 601 struct scx200_acb_iface *iface;
636 602
603 platform_driver_unregister(&scx200_pci_drv);
604
637 mutex_lock(&scx200_acb_list_mutex); 605 mutex_lock(&scx200_acb_list_mutex);
638 while ((iface = scx200_acb_list) != NULL) { 606 while ((iface = scx200_acb_list) != NULL) {
639 scx200_acb_list = iface->next; 607 scx200_acb_list = iface->next;
640 mutex_unlock(&scx200_acb_list_mutex); 608 mutex_unlock(&scx200_acb_list_mutex);
641 609
642 i2c_del_adapter(&iface->adapter); 610 scx200_cleanup_iface(iface);
643
644 if (iface->pdev) {
645 pci_release_region(iface->pdev, iface->bar);
646 pci_dev_put(iface->pdev);
647 }
648 else
649 release_region(iface->base, 8);
650 611
651 kfree(iface);
652 mutex_lock(&scx200_acb_list_mutex); 612 mutex_lock(&scx200_acb_list_mutex);
653 } 613 }
654 mutex_unlock(&scx200_acb_list_mutex); 614 mutex_unlock(&scx200_acb_list_mutex);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c7db6980e3a3..f0bd5bcdf563 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@ static int i2c_device_pm_suspend(struct device *dev)
196{ 196{
197 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 197 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
198 198
199 if (pm) { 199 if (pm)
200 if (pm_runtime_suspended(dev)) 200 return pm_generic_suspend(dev);
201 return 0; 201 else
202 else 202 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
203 return pm->suspend ? pm->suspend(dev) : 0;
204 }
205
206 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
207} 203}
208 204
209static int i2c_device_pm_resume(struct device *dev) 205static int i2c_device_pm_resume(struct device *dev)
210{ 206{
211 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 207 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
212 int ret;
213 208
214 if (pm) 209 if (pm)
215 ret = pm->resume ? pm->resume(dev) : 0; 210 return pm_generic_resume(dev);
216 else 211 else
217 ret = i2c_legacy_resume(dev); 212 return i2c_legacy_resume(dev);
218
219 return ret;
220} 213}
221 214
222static int i2c_device_pm_freeze(struct device *dev) 215static int i2c_device_pm_freeze(struct device *dev)
223{ 216{
224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 217 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
225 218
226 if (pm) { 219 if (pm)
227 if (pm_runtime_suspended(dev)) 220 return pm_generic_freeze(dev);
228 return 0; 221 else
229 else 222 return i2c_legacy_suspend(dev, PMSG_FREEZE);
230 return pm->freeze ? pm->freeze(dev) : 0;
231 }
232
233 return i2c_legacy_suspend(dev, PMSG_FREEZE);
234} 223}
235 224
236static int i2c_device_pm_thaw(struct device *dev) 225static int i2c_device_pm_thaw(struct device *dev)
237{ 226{
238 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 227 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
239 228
240 if (pm) { 229 if (pm)
241 if (pm_runtime_suspended(dev)) 230 return pm_generic_thaw(dev);
242 return 0; 231 else
243 else 232 return i2c_legacy_resume(dev);
244 return pm->thaw ? pm->thaw(dev) : 0;
245 }
246
247 return i2c_legacy_resume(dev);
248} 233}
249 234
250static int i2c_device_pm_poweroff(struct device *dev) 235static int i2c_device_pm_poweroff(struct device *dev)
251{ 236{
252 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 237 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
253 238
254 if (pm) { 239 if (pm)
255 if (pm_runtime_suspended(dev)) 240 return pm_generic_poweroff(dev);
256 return 0; 241 else
257 else 242 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
258 return pm->poweroff ? pm->poweroff(dev) : 0;
259 }
260
261 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
262} 243}
263 244
264static int i2c_device_pm_restore(struct device *dev) 245static int i2c_device_pm_restore(struct device *dev)
265{ 246{
266 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 247 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
267 int ret;
268 248
269 if (pm) 249 if (pm)
270 ret = pm->restore ? pm->restore(dev) : 0; 250 return pm_generic_restore(dev);
271 else 251 else
272 ret = i2c_legacy_resume(dev); 252 return i2c_legacy_resume(dev);
273
274 if (!ret) {
275 pm_runtime_disable(dev);
276 pm_runtime_set_active(dev);
277 pm_runtime_enable(dev);
278 }
279
280 return ret;
281} 253}
282#else /* !CONFIG_PM_SLEEP */ 254#else /* !CONFIG_PM_SLEEP */
283#define i2c_device_pm_suspend NULL 255#define i2c_device_pm_suspend NULL
@@ -1021,6 +993,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
1021static int __unregister_client(struct device *dev, void *dummy) 993static int __unregister_client(struct device *dev, void *dummy)
1022{ 994{
1023 struct i2c_client *client = i2c_verify_client(dev); 995 struct i2c_client *client = i2c_verify_client(dev);
996 if (client && strcmp(client->name, "dummy"))
997 i2c_unregister_device(client);
998 return 0;
999}
1000
1001static int __unregister_dummy(struct device *dev, void *dummy)
1002{
1003 struct i2c_client *client = i2c_verify_client(dev);
1024 if (client) 1004 if (client)
1025 i2c_unregister_device(client); 1005 i2c_unregister_device(client);
1026 return 0; 1006 return 0;
@@ -1075,8 +1055,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
1075 mutex_unlock(&adap->userspace_clients_lock); 1055 mutex_unlock(&adap->userspace_clients_lock);
1076 1056
1077 /* Detach any active clients. This can't fail, thus we do not 1057 /* Detach any active clients. This can't fail, thus we do not
1078 checking the returned value. */ 1058 * check the returned value. This is a two-pass process, because
1059 * we can't remove the dummy devices during the first pass: they
1060 * could have been instantiated by real devices wishing to clean
1061 * them up properly, so we give them a chance to do that first. */
1079 res = device_for_each_child(&adap->dev, NULL, __unregister_client); 1062 res = device_for_each_child(&adap->dev, NULL, __unregister_client);
1063 res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
1080 1064
1081#ifdef CONFIG_I2C_COMPAT 1065#ifdef CONFIG_I2C_COMPAT
1082 class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, 1066 class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
1140 if (res) 1124 if (res)
1141 return res; 1125 return res;
1142 1126
1127 /* Drivers should switch to dev_pm_ops instead. */
1128 if (driver->suspend)
1129 pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
1130 driver->driver.name);
1131 if (driver->resume)
1132 pr_warn("i2c-core: driver [%s] using legacy resume method\n",
1133 driver->driver.name);
1134
1143 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); 1135 pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
1144 1136
1145 INIT_LIST_HEAD(&driver->clients); 1137 INIT_LIST_HEAD(&driver->clients);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 98ccfeb3f5aa..9827c5e686cb 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -134,7 +134,7 @@ config BLK_DEV_IDECD
134 module will be called ide-cd. 134 module will be called ide-cd.
135 135
136config BLK_DEV_IDECD_VERBOSE_ERRORS 136config BLK_DEV_IDECD_VERBOSE_ERRORS
137 bool "Verbose error logging for IDE/ATAPI CDROM driver" if EMBEDDED 137 bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
138 depends on BLK_DEV_IDECD 138 depends on BLK_DEV_IDECD
139 default y 139 default y
140 help 140 help
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 56ac09d6c930..1fa091e05690 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -59,6 +59,8 @@
59#include <linux/hrtimer.h> /* ktime_get_real() */ 59#include <linux/hrtimer.h> /* ktime_get_real() */
60#include <trace/events/power.h> 60#include <trace/events/power.h>
61#include <linux/sched.h> 61#include <linux/sched.h>
62#include <linux/notifier.h>
63#include <linux/cpu.h>
62#include <asm/mwait.h> 64#include <asm/mwait.h>
63 65
64#define INTEL_IDLE_VERSION "0.4" 66#define INTEL_IDLE_VERSION "0.4"
@@ -73,6 +75,7 @@ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
73 75
74static unsigned int mwait_substates; 76static unsigned int mwait_substates;
75 77
78#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
76/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 79/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
77static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 80static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
78 81
@@ -82,6 +85,14 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
82static struct cpuidle_state *cpuidle_state_table; 85static struct cpuidle_state *cpuidle_state_table;
83 86
84/* 87/*
88 * Set this flag for states where the HW flushes the TLB for us
89 * and so we don't need cross-calls to keep it consistent.
90 * If this flag is set, SW flushes the TLB, so even if the
91 * HW doesn't do the flushing, this flag is safe to use.
92 */
93#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
94
95/*
85 * States are indexed by the cstate number, 96 * States are indexed by the cstate number,
86 * which is also the index into the MWAIT hint array. 97 * which is also the index into the MWAIT hint array.
87 * Thus C0 is a dummy. 98 * Thus C0 is a dummy.
@@ -122,7 +133,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
122 .driver_data = (void *) 0x00, 133 .driver_data = (void *) 0x00,
123 .flags = CPUIDLE_FLAG_TIME_VALID, 134 .flags = CPUIDLE_FLAG_TIME_VALID,
124 .exit_latency = 1, 135 .exit_latency = 1,
125 .target_residency = 4, 136 .target_residency = 1,
126 .enter = &intel_idle }, 137 .enter = &intel_idle },
127 { /* MWAIT C2 */ 138 { /* MWAIT C2 */
128 .name = "SNB-C3", 139 .name = "SNB-C3",
@@ -130,7 +141,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
130 .driver_data = (void *) 0x10, 141 .driver_data = (void *) 0x10,
131 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 142 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
132 .exit_latency = 80, 143 .exit_latency = 80,
133 .target_residency = 160, 144 .target_residency = 211,
134 .enter = &intel_idle }, 145 .enter = &intel_idle },
135 { /* MWAIT C3 */ 146 { /* MWAIT C3 */
136 .name = "SNB-C6", 147 .name = "SNB-C6",
@@ -138,7 +149,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
138 .driver_data = (void *) 0x20, 149 .driver_data = (void *) 0x20,
139 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 150 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
140 .exit_latency = 104, 151 .exit_latency = 104,
141 .target_residency = 208, 152 .target_residency = 345,
142 .enter = &intel_idle }, 153 .enter = &intel_idle },
143 { /* MWAIT C4 */ 154 { /* MWAIT C4 */
144 .name = "SNB-C7", 155 .name = "SNB-C7",
@@ -146,7 +157,7 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
146 .driver_data = (void *) 0x30, 157 .driver_data = (void *) 0x30,
147 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 158 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
148 .exit_latency = 109, 159 .exit_latency = 109,
149 .target_residency = 300, 160 .target_residency = 345,
150 .enter = &intel_idle }, 161 .enter = &intel_idle },
151}; 162};
152 163
@@ -220,8 +231,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
220 kt_before = ktime_get_real(); 231 kt_before = ktime_get_real();
221 232
222 stop_critical_timings(); 233 stop_critical_timings();
223 trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
224 trace_cpu_idle((eax >> 4) + 1, cpu);
225 if (!need_resched()) { 234 if (!need_resched()) {
226 235
227 __monitor((void *)&current_thread_info()->flags, 0, 0); 236 __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -243,6 +252,35 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
243 return usec_delta; 252 return usec_delta;
244} 253}
245 254
255static void __setup_broadcast_timer(void *arg)
256{
257 unsigned long reason = (unsigned long)arg;
258 int cpu = smp_processor_id();
259
260 reason = reason ?
261 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
262
263 clockevents_notify(reason, &cpu);
264}
265
266static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
267 unsigned long action, void *hcpu)
268{
269 int hotcpu = (unsigned long)hcpu;
270
271 switch (action & 0xf) {
272 case CPU_ONLINE:
273 smp_call_function_single(hotcpu, __setup_broadcast_timer,
274 (void *)true, 1);
275 break;
276 }
277 return NOTIFY_OK;
278}
279
280static struct notifier_block setup_broadcast_notifier = {
281 .notifier_call = setup_broadcast_cpuhp_notify,
282};
283
246/* 284/*
247 * intel_idle_probe() 285 * intel_idle_probe()
248 */ 286 */
@@ -305,7 +343,11 @@ static int intel_idle_probe(void)
305 } 343 }
306 344
307 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 345 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
308 lapic_timer_reliable_states = 0xFFFFFFFF; 346 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
347 else {
348 smp_call_function(__setup_broadcast_timer, (void *)true, 1);
349 register_cpu_notifier(&setup_broadcast_notifier);
350 }
309 351
310 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 352 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
311 " model 0x%X\n", boot_cpu_data.x86_model); 353 " model 0x%X\n", boot_cpu_data.x86_model);
@@ -403,6 +445,10 @@ static int __init intel_idle_init(void)
403{ 445{
404 int retval; 446 int retval;
405 447
448 /* Do not load intel_idle at all for now if idle= is passed */
449 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
450 return -ENODEV;
451
406 retval = intel_idle_probe(); 452 retval = intel_idle_probe();
407 if (retval) 453 if (retval)
408 return retval; 454 return retval;
@@ -428,6 +474,11 @@ static void __exit intel_idle_exit(void)
428 intel_idle_cpuidle_devices_uninit(); 474 intel_idle_cpuidle_devices_uninit();
429 cpuidle_unregister_driver(&intel_idle_driver); 475 cpuidle_unregister_driver(&intel_idle_driver);
430 476
477 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
478 smp_call_function(__setup_broadcast_timer, (void *)false, 1);
479 unregister_cpu_notifier(&setup_broadcast_notifier);
480 }
481
431 return; 482 return;
432} 483}
433 484
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 68883565b725..f9ba7d74dfc0 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
308 INIT_WORK(&work->work, ib_cache_task); 308 INIT_WORK(&work->work, ib_cache_task);
309 work->device = event->device; 309 work->device = event->device;
310 work->port_num = event->element.port_num; 310 work->port_num = event->element.port_num;
311 schedule_work(&work->work); 311 queue_work(ib_wq, &work->work);
312 } 312 }
313 } 313 }
314} 314}
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
368 int p; 368 int p;
369 369
370 ib_unregister_event_handler(&device->cache.event_handler); 370 ib_unregister_event_handler(&device->cache.event_handler);
371 flush_scheduled_work(); 371 flush_workqueue(ib_wq);
372 372
373 for (p = 0; p <= end_port(device) - start_port(device); ++p) { 373 for (p = 0; p <= end_port(device) - start_port(device); ++p) {
374 kfree(device->cache.pkey_cache[p]); 374 kfree(device->cache.pkey_cache[p]);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a19effad0811..f793bf2f5da7 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,7 +38,6 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/workqueue.h>
42 41
43#include "core_priv.h" 42#include "core_priv.h"
44 43
@@ -52,6 +51,9 @@ struct ib_client_data {
52 void * data; 51 void * data;
53}; 52};
54 53
54struct workqueue_struct *ib_wq;
55EXPORT_SYMBOL_GPL(ib_wq);
56
55static LIST_HEAD(device_list); 57static LIST_HEAD(device_list);
56static LIST_HEAD(client_list); 58static LIST_HEAD(client_list);
57 59
@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
718{ 720{
719 int ret; 721 int ret;
720 722
723 ib_wq = alloc_workqueue("infiniband", 0, 0);
724 if (!ib_wq)
725 return -ENOMEM;
726
721 ret = ib_sysfs_setup(); 727 ret = ib_sysfs_setup();
722 if (ret) 728 if (ret)
723 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); 729 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
726 if (ret) { 732 if (ret) {
727 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 733 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
728 ib_sysfs_cleanup(); 734 ib_sysfs_cleanup();
735 destroy_workqueue(ib_wq);
729 } 736 }
730 737
731 return ret; 738 return ret;
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
736 ib_cache_cleanup(); 743 ib_cache_cleanup();
737 ib_sysfs_cleanup(); 744 ib_sysfs_cleanup();
738 /* Make sure that any pending umem accounting work is done. */ 745 /* Make sure that any pending umem accounting work is done. */
739 flush_scheduled_work(); 746 destroy_workqueue(ib_wq);
740} 747}
741 748
742module_init(ib_core_init); 749module_init(ib_core_init);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 91a660310b7c..fbbfa24cf572 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
425 port->sm_ah = NULL; 425 port->sm_ah = NULL;
426 spin_unlock_irqrestore(&port->ah_lock, flags); 426 spin_unlock_irqrestore(&port->ah_lock, flags);
427 427
428 schedule_work(&sa_dev->port[event->element.port_num - 428 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
429 sa_dev->start_port].update_task); 429 sa_dev->start_port].update_task);
430 } 430 }
431} 431}
@@ -1079,7 +1079,7 @@ static void ib_sa_remove_one(struct ib_device *device)
1079 1079
1080 ib_unregister_event_handler(&sa_dev->event_handler); 1080 ib_unregister_event_handler(&sa_dev->event_handler);
1081 1081
1082 flush_scheduled_work(); 1082 flush_workqueue(ib_wq);
1083 1083
1084 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1084 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1085 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { 1085 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca12acf38379..ec1e9da1488b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -636,6 +636,16 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
636 } 636 }
637} 637}
638 638
639static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
640 struct rdma_route *route)
641{
642 struct rdma_dev_addr *dev_addr;
643
644 dev_addr = &route->addr.dev_addr;
645 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
646 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
647}
648
639static ssize_t ucma_query_route(struct ucma_file *file, 649static ssize_t ucma_query_route(struct ucma_file *file,
640 const char __user *inbuf, 650 const char __user *inbuf,
641 int in_len, int out_len) 651 int in_len, int out_len)
@@ -670,8 +680,10 @@ static ssize_t ucma_query_route(struct ucma_file *file,
670 680
671 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 681 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
672 resp.port_num = ctx->cm_id->port_num; 682 resp.port_num = ctx->cm_id->port_num;
673 if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) { 683 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
674 switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) { 684 case RDMA_TRANSPORT_IB:
685 switch (rdma_port_get_link_layer(ctx->cm_id->device,
686 ctx->cm_id->port_num)) {
675 case IB_LINK_LAYER_INFINIBAND: 687 case IB_LINK_LAYER_INFINIBAND:
676 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 688 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
677 break; 689 break;
@@ -681,6 +693,12 @@ static ssize_t ucma_query_route(struct ucma_file *file,
681 default: 693 default:
682 break; 694 break;
683 } 695 }
696 break;
697 case RDMA_TRANSPORT_IWARP:
698 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
699 break;
700 default:
701 break;
684 } 702 }
685 703
686out: 704out:
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 415e186eee32..b645e558876f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
262 umem->mm = mm; 262 umem->mm = mm;
263 umem->diff = diff; 263 umem->diff = diff;
264 264
265 schedule_work(&umem->work); 265 queue_work(ib_wq, &umem->work);
266 return; 266 return;
267 } 267 }
268 } else 268 } else
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 85cfae4cad71..8c81992fa6db 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -459,13 +459,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
459 IB_DEVICE_MEM_WINDOW); 459 IB_DEVICE_MEM_WINDOW);
460 460
461 /* Allocate the qptr_array */ 461 /* Allocate the qptr_array */
462 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 462 c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
463 if (!c2dev->qptr_array) { 463 if (!c2dev->qptr_array) {
464 return -ENOMEM; 464 return -ENOMEM;
465 } 465 }
466 466
467 /* Inialize the qptr_array */ 467 /* Initialize the qptr_array */
468 memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
469 c2dev->qptr_array[0] = (void *) &c2dev->req_vq; 468 c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
470 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; 469 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
471 c2dev->qptr_array[2] = (void *) &c2dev->aeq; 470 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 9ce7819b7b2e..2ec716fb2edb 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -107,7 +107,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); 107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
108 if (r) { 108 if (r) {
109 init_waitqueue_head(&r->wait_object); 109 init_waitqueue_head(&r->wait_object);
110 r->reply_msg = (u64) NULL; 110 r->reply_msg = 0;
111 r->event = 0; 111 r->event = 0;
112 r->cm_id = NULL; 112 r->cm_id = NULL;
113 r->qp = NULL; 113 r->qp = NULL;
@@ -123,7 +123,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
123 */ 123 */
124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) 124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
125{ 125{
126 r->reply_msg = (u64) NULL; 126 r->reply_msg = 0;
127 if (atomic_dec_and_test(&r->refcnt)) { 127 if (atomic_dec_and_test(&r->refcnt)) {
128 kfree(r); 128 kfree(r);
129 } 129 }
@@ -151,7 +151,7 @@ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) 151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
152{ 152{
153 if (atomic_dec_and_test(&r->refcnt)) { 153 if (atomic_dec_and_test(&r->refcnt)) {
154 if (r->reply_msg != (u64) NULL) 154 if (r->reply_msg != 0)
155 vq_repbuf_free(c2dev, 155 vq_repbuf_free(c2dev,
156 (void *) (unsigned long) r->reply_msg); 156 (void *) (unsigned long) r->reply_msg);
157 kfree(r); 157 kfree(r);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0dc62b1438be..8b00e6c46f01 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -380,7 +380,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
380 16)) | FW_WR_FLOWID(ep->hwtid)); 380 16)) | FW_WR_FLOWID(ep->hwtid));
381 381
382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
383 flowc->mnemval[0].val = cpu_to_be32(0); 383 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 20800900ef3f..4f0be25cab1a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
220 V_FW_RI_RES_WR_DCAEN(0) | 220 V_FW_RI_RES_WR_DCAEN(0) |
221 V_FW_RI_RES_WR_DCACPU(0) | 221 V_FW_RI_RES_WR_DCACPU(0) |
222 V_FW_RI_RES_WR_FBMIN(2) | 222 V_FW_RI_RES_WR_FBMIN(2) |
223 V_FW_RI_RES_WR_FBMAX(3) | 223 V_FW_RI_RES_WR_FBMAX(2) |
224 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 224 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
225 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 225 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
226 V_FW_RI_RES_WR_EQSIZE(eqsize)); 226 V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
243 V_FW_RI_RES_WR_DCAEN(0) | 243 V_FW_RI_RES_WR_DCAEN(0) |
244 V_FW_RI_RES_WR_DCACPU(0) | 244 V_FW_RI_RES_WR_DCACPU(0) |
245 V_FW_RI_RES_WR_FBMIN(2) | 245 V_FW_RI_RES_WR_FBMIN(2) |
246 V_FW_RI_RES_WR_FBMAX(3) | 246 V_FW_RI_RES_WR_FBMAX(2) |
247 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 247 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
248 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 248 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
249 V_FW_RI_RES_WR_EQSIZE(eqsize)); 249 V_FW_RI_RES_WR_EQSIZE(eqsize));
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 1596e3085344..1898d6e7cce5 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -222,15 +222,14 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
222 queue->small_page = NULL; 222 queue->small_page = NULL;
223 223
224 /* allocate queue page pointers */ 224 /* allocate queue page pointers */
225 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
226 if (!queue->queue_pages) { 226 if (!queue->queue_pages) {
227 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); 227 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
228 if (!queue->queue_pages) { 228 if (!queue->queue_pages) {
229 ehca_gen_err("Couldn't allocate queue page list"); 229 ehca_gen_err("Couldn't allocate queue page list");
230 return 0; 230 return 0;
231 } 231 }
232 } 232 }
233 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
234 233
235 /* allocate actual queue pages */ 234 /* allocate actual queue pages */
236 if (is_small) { 235 if (is_small) {
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index b33f0457a1ff..47db4bf34628 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -199,12 +199,11 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
199 goto bail; 199 goto bail;
200 } 200 }
201 201
202 dd = vmalloc(sizeof(*dd)); 202 dd = vzalloc(sizeof(*dd));
203 if (!dd) { 203 if (!dd) {
204 dd = ERR_PTR(-ENOMEM); 204 dd = ERR_PTR(-ENOMEM);
205 goto bail; 205 goto bail;
206 } 206 }
207 memset(dd, 0, sizeof(*dd));
208 dd->ipath_unit = -1; 207 dd->ipath_unit = -1;
209 208
210 spin_lock_irqsave(&ipath_devs_lock, flags); 209 spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -756,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
756 */ 755 */
757 ipath_shutdown_device(dd); 756 ipath_shutdown_device(dd);
758 757
759 flush_scheduled_work(); 758 flush_workqueue(ib_wq);
760 759
761 if (dd->verbs_dev) 760 if (dd->verbs_dev)
762 ipath_unregister_ib_device(dd->verbs_dev); 761 ipath_unregister_ib_device(dd->verbs_dev);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 9292a15ad7c4..6d4b29c4cd89 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1530,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd,
1530 } 1530 }
1531 1531
1532 num_subports = uinfo->spu_subport_cnt; 1532 num_subports = uinfo->spu_subport_cnt;
1533 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); 1533 pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
1534 if (!pd->subport_uregbase) { 1534 if (!pd->subport_uregbase) {
1535 ret = -ENOMEM; 1535 ret = -ENOMEM;
1536 goto bail; 1536 goto bail;
@@ -1538,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd,
1538 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ 1538 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1539 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * 1539 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1540 sizeof(u32), PAGE_SIZE) * num_subports; 1540 sizeof(u32), PAGE_SIZE) * num_subports;
1541 pd->subport_rcvhdr_base = vmalloc(size); 1541 pd->subport_rcvhdr_base = vzalloc(size);
1542 if (!pd->subport_rcvhdr_base) { 1542 if (!pd->subport_rcvhdr_base) {
1543 ret = -ENOMEM; 1543 ret = -ENOMEM;
1544 goto bail_ureg; 1544 goto bail_ureg;
1545 } 1545 }
1546 1546
1547 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * 1547 pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
1548 pd->port_rcvegrbuf_size * 1548 pd->port_rcvegrbuf_size *
1549 num_subports); 1549 num_subports);
1550 if (!pd->subport_rcvegrbuf) { 1550 if (!pd->subport_rcvegrbuf) {
@@ -1556,11 +1556,6 @@ static int init_subports(struct ipath_devdata *dd,
1556 pd->port_subport_id = uinfo->spu_subport_id; 1556 pd->port_subport_id = uinfo->spu_subport_id;
1557 pd->active_slaves = 1; 1557 pd->active_slaves = 1;
1558 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); 1558 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1559 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
1560 memset(pd->subport_rcvhdr_base, 0, size);
1561 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
1562 pd->port_rcvegrbuf_size *
1563 num_subports);
1564 goto bail; 1559 goto bail;
1565 1560
1566bail_rhdr: 1561bail_rhdr:
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 776938299e4c..fef0f4201257 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
442 struct page **pages; 442 struct page **pages;
443 dma_addr_t *addrs; 443 dma_addr_t *addrs;
444 444
445 pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * 445 pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
446 sizeof(struct page *)); 446 sizeof(struct page *));
447 if (!pages) { 447 if (!pages) {
448 ipath_dev_err(dd, "failed to allocate shadow page * " 448 ipath_dev_err(dd, "failed to allocate shadow page * "
@@ -461,9 +461,6 @@ static void init_shadow_tids(struct ipath_devdata *dd)
461 return; 461 return;
462 } 462 }
463 463
464 memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
465 sizeof(struct page *));
466
467 dd->ipath_pageshadow = pages; 464 dd->ipath_pageshadow = pages;
468 dd->ipath_physshadow = addrs; 465 dd->ipath_physshadow = addrs;
469} 466}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 5e86d73eba2a..bab9f74c0665 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
220 work->mm = mm; 220 work->mm = mm;
221 work->num_pages = num_pages; 221 work->num_pages = num_pages;
222 222
223 schedule_work(&work->work); 223 queue_work(ib_wq, &work->work);
224 return; 224 return;
225 225
226bail_mm: 226bail_mm:
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4c85224aeaa7..c7a6213c6996 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -623,8 +623,9 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
624 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
625 625
626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & 626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
627 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); 627 !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
628 MLX4_PROTOCOL_IB);
628 if (err) 629 if (err)
629 return err; 630 return err;
630 631
@@ -635,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
635 return 0; 636 return 0;
636 637
637err_add: 638err_add:
638 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); 639 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
639 return err; 640 return err;
640} 641}
641 642
@@ -665,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
665 struct mlx4_ib_gid_entry *ge; 666 struct mlx4_ib_gid_entry *ge;
666 667
667 err = mlx4_multicast_detach(mdev->dev, 668 err = mlx4_multicast_detach(mdev->dev,
668 &mqp->mqp, gid->raw); 669 &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
669 if (err) 670 if (err)
670 return err; 671 return err;
671 672
@@ -1005,7 +1006,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1005 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) 1006 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1006 goto err_pd; 1007 goto err_pd;
1007 1008
1008 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1009 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
1010 PAGE_SIZE);
1009 if (!ibdev->uar_map) 1011 if (!ibdev->uar_map)
1010 goto err_uar; 1012 goto err_uar;
1011 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 1013 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 03efc074967e..da314c3fec23 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -7,7 +7,7 @@ config INFINIBAND_MTHCA
7 ("Tavor") and the MT25208 PCI Express HCA ("Arbel"). 7 ("Tavor") and the MT25208 PCI Express HCA ("Arbel").
8 8
9config INFINIBAND_MTHCA_DEBUG 9config INFINIBAND_MTHCA_DEBUG
10 bool "Verbose debugging output" if EMBEDDED 10 bool "Verbose debugging output" if EXPERT
11 depends on INFINIBAND_MTHCA 11 depends on INFINIBAND_MTHCA
12 default y 12 default y
13 ---help--- 13 ---help---
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 0aa0110e4b6c..e4a08c2819e4 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -146,7 +146,7 @@ static void poll_catas(unsigned long dev_ptr)
146 146
147void mthca_start_catas_poll(struct mthca_dev *dev) 147void mthca_start_catas_poll(struct mthca_dev *dev)
148{ 148{
149 unsigned long addr; 149 phys_addr_t addr;
150 150
151 init_timer(&dev->catas_err.timer); 151 init_timer(&dev->catas_err.timer);
152 dev->catas_err.map = NULL; 152 dev->catas_err.map = NULL;
@@ -158,7 +158,8 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
158 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 158 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
159 if (!dev->catas_err.map) { 159 if (!dev->catas_err.map) {
160 mthca_warn(dev, "couldn't map catastrophic error region " 160 mthca_warn(dev, "couldn't map catastrophic error region "
161 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); 161 "at 0x%llx/0x%x\n", (unsigned long long) addr,
162 dev->catas_err.size * 4);
162 return; 163 return;
163 } 164 }
164 165
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f4ceecd9684b..7bfa2a164955 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -713,7 +713,7 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
713 713
714static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) 714static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
715{ 715{
716 unsigned long addr; 716 phys_addr_t addr;
717 u16 max_off = 0; 717 u16 max_off = 0;
718 int i; 718 int i;
719 719
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 8e8c728aff88..76785c653c13 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -653,7 +653,7 @@ static int mthca_map_reg(struct mthca_dev *dev,
653 unsigned long offset, unsigned long size, 653 unsigned long offset, unsigned long size,
654 void __iomem **map) 654 void __iomem **map)
655{ 655{
656 unsigned long base = pci_resource_start(dev->pdev, 0); 656 phys_addr_t base = pci_resource_start(dev->pdev, 0);
657 657
658 *map = ioremap(base + offset, size); 658 *map = ioremap(base + offset, size);
659 if (!*map) 659 if (!*map)
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 5eee6665919a..8a40cd539ab1 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -790,7 +790,7 @@ static int mthca_setup_hca(struct mthca_dev *dev)
790 goto err_uar_table_free; 790 goto err_uar_table_free;
791 } 791 }
792 792
793 dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 793 dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
794 if (!dev->kar) { 794 if (!dev->kar) {
795 mthca_err(dev, "Couldn't map kernel access region, " 795 mthca_err(dev, "Couldn't map kernel access region, "
796 "aborting.\n"); 796 "aborting.\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 065b20899876..44045c8846db 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -853,7 +853,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
853 853
854int mthca_init_mr_table(struct mthca_dev *dev) 854int mthca_init_mr_table(struct mthca_dev *dev)
855{ 855{
856 unsigned long addr; 856 phys_addr_t addr;
857 int mpts, mtts, err, i; 857 int mpts, mtts, err, i;
858 858
859 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, 859 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 0c9f0aa5d4ea..3b4ec3238ceb 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -144,6 +144,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
144 struct nes_device *nesdev; 144 struct nes_device *nesdev;
145 struct net_device *netdev; 145 struct net_device *netdev;
146 struct nes_vnic *nesvnic; 146 struct nes_vnic *nesvnic;
147 unsigned int is_bonded;
147 148
148 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", 149 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n",
149 &ifa->ifa_address, &ifa->ifa_mask); 150 &ifa->ifa_address, &ifa->ifa_mask);
@@ -152,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
152 nesdev, nesdev->netdev[0]->name); 153 nesdev, nesdev->netdev[0]->name);
153 netdev = nesdev->netdev[0]; 154 netdev = nesdev->netdev[0];
154 nesvnic = netdev_priv(netdev); 155 nesvnic = netdev_priv(netdev);
155 if (netdev == event_netdev) { 156 is_bonded = (netdev->master == event_netdev);
157 if ((netdev == event_netdev) || is_bonded) {
156 if (nesvnic->rdma_enabled == 0) { 158 if (nesvnic->rdma_enabled == 0) {
157 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" 159 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
158 " RDMA is not enabled.\n", 160 " RDMA is not enabled.\n",
@@ -169,7 +171,10 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
169 nes_manage_arp_cache(netdev, netdev->dev_addr, 171 nes_manage_arp_cache(netdev, netdev->dev_addr,
170 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); 172 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
171 nesvnic->local_ipaddr = 0; 173 nesvnic->local_ipaddr = 0;
172 return NOTIFY_OK; 174 if (is_bonded)
175 continue;
176 else
177 return NOTIFY_OK;
173 break; 178 break;
174 case NETDEV_UP: 179 case NETDEV_UP:
175 nes_debug(NES_DBG_NETDEV, "event:UP\n"); 180 nes_debug(NES_DBG_NETDEV, "event:UP\n");
@@ -178,15 +183,24 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
178 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); 183 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
179 return NOTIFY_OK; 184 return NOTIFY_OK;
180 } 185 }
186 /* fall through */
187 case NETDEV_CHANGEADDR:
181 /* Add the address to the IP table */ 188 /* Add the address to the IP table */
182 nesvnic->local_ipaddr = ifa->ifa_address; 189 if (netdev->master)
190 nesvnic->local_ipaddr =
191 ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
192 else
193 nesvnic->local_ipaddr = ifa->ifa_address;
183 194
184 nes_write_indexed(nesdev, 195 nes_write_indexed(nesdev,
185 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 196 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
186 ntohl(ifa->ifa_address)); 197 ntohl(nesvnic->local_ipaddr));
187 nes_manage_arp_cache(netdev, netdev->dev_addr, 198 nes_manage_arp_cache(netdev, netdev->dev_addr,
188 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); 199 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
189 return NOTIFY_OK; 200 if (is_bonded)
201 continue;
202 else
203 return NOTIFY_OK;
190 break; 204 break;
191 default: 205 default:
192 break; 206 break;
@@ -660,6 +674,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
660 } 674 }
661 nes_notifiers_registered++; 675 nes_notifiers_registered++;
662 676
677 INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
678
663 /* Initialize network devices */ 679 /* Initialize network devices */
664 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) 680 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
665 goto bail7; 681 goto bail7;
@@ -742,6 +758,7 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
742 struct nes_device *nesdev = pci_get_drvdata(pcidev); 758 struct nes_device *nesdev = pci_get_drvdata(pcidev);
743 struct net_device *netdev; 759 struct net_device *netdev;
744 int netdev_index = 0; 760 int netdev_index = 0;
761 unsigned long flags;
745 762
746 if (nesdev->netdev_count) { 763 if (nesdev->netdev_count) {
747 netdev = nesdev->netdev[netdev_index]; 764 netdev = nesdev->netdev[netdev_index];
@@ -768,6 +785,14 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
768 free_irq(pcidev->irq, nesdev); 785 free_irq(pcidev->irq, nesdev);
769 tasklet_kill(&nesdev->dpc_tasklet); 786 tasklet_kill(&nesdev->dpc_tasklet);
770 787
788 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
789 if (nesdev->link_recheck) {
790 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
791 cancel_delayed_work_sync(&nesdev->work);
792 } else {
793 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
794 }
795
771 /* Deallocate the Adapter Structure */ 796 /* Deallocate the Adapter Structure */
772 nes_destroy_adapter(nesdev->nesadapter); 797 nes_destroy_adapter(nesdev->nesadapter);
773 798
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index b3d145e82b4c..6fe79876009e 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -268,6 +268,9 @@ struct nes_device {
268 u8 napi_isr_ran; 268 u8 napi_isr_ran;
269 u8 disable_rx_flow_control; 269 u8 disable_rx_flow_control;
270 u8 disable_tx_flow_control; 270 u8 disable_tx_flow_control;
271
272 struct delayed_work work;
273 u8 link_recheck;
271}; 274};
272 275
273 276
@@ -507,6 +510,7 @@ void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
507void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 510void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
508int nes_destroy_cqp(struct nes_device *); 511int nes_destroy_cqp(struct nes_device *);
509int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 512int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
513void nes_recheck_link_status(struct work_struct *work);
510 514
511/* nes_nic.c */ 515/* nes_nic.c */
512struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); 516struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 25ad0f9944c0..009ec814d517 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1107,6 +1107,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1107 struct flowi fl; 1107 struct flowi fl;
1108 struct neighbour *neigh; 1108 struct neighbour *neigh;
1109 int rc = arpindex; 1109 int rc = arpindex;
1110 struct net_device *netdev;
1110 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1111 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1111 1112
1112 memset(&fl, 0, sizeof fl); 1113 memset(&fl, 0, sizeof fl);
@@ -1117,7 +1118,12 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1117 return rc; 1118 return rc;
1118 } 1119 }
1119 1120
1120 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev); 1121 if (nesvnic->netdev->master)
1122 netdev = nesvnic->netdev->master;
1123 else
1124 netdev = nesvnic->netdev;
1125
1126 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
1121 if (neigh) { 1127 if (neigh) {
1122 if (neigh->nud_state & NUD_VALID) { 1128 if (neigh->nud_state & NUD_VALID) {
1123 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" 1129 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1980a461c499..8b606fd64022 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2608,6 +2608,13 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2608 netif_start_queue(nesvnic->netdev); 2608 netif_start_queue(nesvnic->netdev);
2609 nesvnic->linkup = 1; 2609 nesvnic->linkup = 1;
2610 netif_carrier_on(nesvnic->netdev); 2610 netif_carrier_on(nesvnic->netdev);
2611
2612 spin_lock(&nesvnic->port_ibevent_lock);
2613 if (nesdev->iw_status == 0) {
2614 nesdev->iw_status = 1;
2615 nes_port_ibevent(nesvnic);
2616 }
2617 spin_unlock(&nesvnic->port_ibevent_lock);
2611 } 2618 }
2612 } 2619 }
2613 } else { 2620 } else {
@@ -2633,9 +2640,23 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2633 netif_stop_queue(nesvnic->netdev); 2640 netif_stop_queue(nesvnic->netdev);
2634 nesvnic->linkup = 0; 2641 nesvnic->linkup = 0;
2635 netif_carrier_off(nesvnic->netdev); 2642 netif_carrier_off(nesvnic->netdev);
2643
2644 spin_lock(&nesvnic->port_ibevent_lock);
2645 if (nesdev->iw_status == 1) {
2646 nesdev->iw_status = 0;
2647 nes_port_ibevent(nesvnic);
2648 }
2649 spin_unlock(&nesvnic->port_ibevent_lock);
2636 } 2650 }
2637 } 2651 }
2638 } 2652 }
2653 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
2654 if (nesdev->link_recheck)
2655 cancel_delayed_work(&nesdev->work);
2656 nesdev->link_recheck = 1;
2657 schedule_delayed_work(&nesdev->work,
2658 NES_LINK_RECHECK_DELAY);
2659 }
2639 } 2660 }
2640 2661
2641 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2662 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
@@ -2643,6 +2664,80 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2643 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; 2664 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
2644} 2665}
2645 2666
2667void nes_recheck_link_status(struct work_struct *work)
2668{
2669 unsigned long flags;
2670 struct nes_device *nesdev = container_of(work, struct nes_device, work.work);
2671 struct nes_adapter *nesadapter = nesdev->nesadapter;
2672 struct nes_vnic *nesvnic;
2673 u32 mac_index = nesdev->mac_index;
2674 u16 phy_data;
2675 u16 temp_phy_data;
2676
2677 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2678
2679 /* check link status */
2680 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
2681 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2682
2683 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
2684 nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2685 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021);
2686 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2687
2688 phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
2689
2690 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2691 __func__, phy_data,
2692 nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
2693
2694 if (phy_data & 0x0004) {
2695 nesadapter->mac_link_down[mac_index] = 0;
2696 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2697 if (nesvnic->linkup == 0) {
2698 printk(PFX "The Link is now up for port %s, netdev %p.\n",
2699 nesvnic->netdev->name, nesvnic->netdev);
2700 if (netif_queue_stopped(nesvnic->netdev))
2701 netif_start_queue(nesvnic->netdev);
2702 nesvnic->linkup = 1;
2703 netif_carrier_on(nesvnic->netdev);
2704
2705 spin_lock(&nesvnic->port_ibevent_lock);
2706 if (nesdev->iw_status == 0) {
2707 nesdev->iw_status = 1;
2708 nes_port_ibevent(nesvnic);
2709 }
2710 spin_unlock(&nesvnic->port_ibevent_lock);
2711 }
2712 }
2713
2714 } else {
2715 nesadapter->mac_link_down[mac_index] = 1;
2716 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2717 if (nesvnic->linkup == 1) {
2718 printk(PFX "The Link is now down for port %s, netdev %p.\n",
2719 nesvnic->netdev->name, nesvnic->netdev);
2720 if (!(netif_queue_stopped(nesvnic->netdev)))
2721 netif_stop_queue(nesvnic->netdev);
2722 nesvnic->linkup = 0;
2723 netif_carrier_off(nesvnic->netdev);
2724
2725 spin_lock(&nesvnic->port_ibevent_lock);
2726 if (nesdev->iw_status == 1) {
2727 nesdev->iw_status = 0;
2728 nes_port_ibevent(nesvnic);
2729 }
2730 spin_unlock(&nesvnic->port_ibevent_lock);
2731 }
2732 }
2733 }
2734 if (nesdev->link_recheck++ < NES_LINK_RECHECK_MAX)
2735 schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
2736 else
2737 nesdev->link_recheck = 0;
2738
2739 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2740}
2646 2741
2647 2742
2648static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) 2743static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1204c3432b63..d2abe07133a5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1193,6 +1193,8 @@ struct nes_listener {
1193 1193
1194struct nes_ib_device; 1194struct nes_ib_device;
1195 1195
1196#define NES_EVENT_DELAY msecs_to_jiffies(100)
1197
1196struct nes_vnic { 1198struct nes_vnic {
1197 struct nes_ib_device *nesibdev; 1199 struct nes_ib_device *nesibdev;
1198 u64 sq_full; 1200 u64 sq_full;
@@ -1247,6 +1249,10 @@ struct nes_vnic {
1247 u32 lro_max_aggr; 1249 u32 lro_max_aggr;
1248 struct net_lro_mgr lro_mgr; 1250 struct net_lro_mgr lro_mgr;
1249 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; 1251 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
1252 struct timer_list event_timer;
1253 enum ib_event_type delayed_event;
1254 enum ib_event_type last_dispatched_event;
1255 spinlock_t port_ibevent_lock;
1250}; 1256};
1251 1257
1252struct nes_ib_device { 1258struct nes_ib_device {
@@ -1348,6 +1354,10 @@ struct nes_terminate_hdr {
1348#define BAD_FRAME_OFFSET 64 1354#define BAD_FRAME_OFFSET 64
1349#define CQE_MAJOR_DRV 0x8000 1355#define CQE_MAJOR_DRV 0x8000
1350 1356
1357/* Used for link status recheck after interrupt processing */
1358#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50)
1359#define NES_LINK_RECHECK_MAX 60
1360
1351#define nes_vlan_rx vlan_hwaccel_receive_skb 1361#define nes_vlan_rx vlan_hwaccel_receive_skb
1352#define nes_netif_rx netif_receive_skb 1362#define nes_netif_rx netif_receive_skb
1353 1363
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 5a4c36484722..2c9c1933bbe3 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -144,6 +144,7 @@ static int nes_netdev_open(struct net_device *netdev)
144 u32 nic_active_bit; 144 u32 nic_active_bit;
145 u32 nic_active; 145 u32 nic_active;
146 struct list_head *list_pos, *list_temp; 146 struct list_head *list_pos, *list_temp;
147 unsigned long flags;
147 148
148 assert(nesdev != NULL); 149 assert(nesdev != NULL);
149 150
@@ -233,18 +234,36 @@ static int nes_netdev_open(struct net_device *netdev)
233 first_nesvnic = nesvnic; 234 first_nesvnic = nesvnic;
234 } 235 }
235 236
236 if (nesvnic->of_device_registered) {
237 nesdev->iw_status = 1;
238 nesdev->nesadapter->send_term_ok = 1;
239 nes_port_ibevent(nesvnic);
240 }
241
242 if (first_nesvnic->linkup) { 237 if (first_nesvnic->linkup) {
243 /* Enable network packets */ 238 /* Enable network packets */
244 nesvnic->linkup = 1; 239 nesvnic->linkup = 1;
245 netif_start_queue(netdev); 240 netif_start_queue(netdev);
246 netif_carrier_on(netdev); 241 netif_carrier_on(netdev);
247 } 242 }
243
244 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
245 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
246 if (nesdev->link_recheck)
247 cancel_delayed_work(&nesdev->work);
248 nesdev->link_recheck = 1;
249 schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
250 }
251 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
252
253 spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
254 if (nesvnic->of_device_registered) {
255 nesdev->nesadapter->send_term_ok = 1;
256 if (nesvnic->linkup == 1) {
257 if (nesdev->iw_status == 0) {
258 nesdev->iw_status = 1;
259 nes_port_ibevent(nesvnic);
260 }
261 } else {
262 nesdev->iw_status = 0;
263 }
264 }
265 spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
266
248 napi_enable(&nesvnic->napi); 267 napi_enable(&nesvnic->napi);
249 nesvnic->netdev_open = 1; 268 nesvnic->netdev_open = 1;
250 269
@@ -263,6 +282,7 @@ static int nes_netdev_stop(struct net_device *netdev)
263 u32 nic_active; 282 u32 nic_active;
264 struct nes_vnic *first_nesvnic = NULL; 283 struct nes_vnic *first_nesvnic = NULL;
265 struct list_head *list_pos, *list_temp; 284 struct list_head *list_pos, *list_temp;
285 unsigned long flags;
266 286
267 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", 287 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
268 nesvnic, nesdev, netdev, netdev->name); 288 nesvnic, nesdev, netdev, netdev->name);
@@ -315,12 +335,17 @@ static int nes_netdev_stop(struct net_device *netdev)
315 nic_active &= nic_active_mask; 335 nic_active &= nic_active_mask;
316 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active); 336 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
317 337
318 338 spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
319 if (nesvnic->of_device_registered) { 339 if (nesvnic->of_device_registered) {
320 nesdev->nesadapter->send_term_ok = 0; 340 nesdev->nesadapter->send_term_ok = 0;
321 nesdev->iw_status = 0; 341 nesdev->iw_status = 0;
322 nes_port_ibevent(nesvnic); 342 if (nesvnic->linkup == 1)
343 nes_port_ibevent(nesvnic);
323 } 344 }
345 del_timer_sync(&nesvnic->event_timer);
346 nesvnic->event_timer.function = NULL;
347 spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
348
324 nes_destroy_nic_qp(nesvnic); 349 nes_destroy_nic_qp(nesvnic);
325 350
326 nesvnic->netdev_open = 0; 351 nesvnic->netdev_open = 0;
@@ -1750,7 +1775,10 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1750 nesvnic->rdma_enabled = 0; 1775 nesvnic->rdma_enabled = 0;
1751 } 1776 }
1752 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id; 1777 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
1778 init_timer(&nesvnic->event_timer);
1779 nesvnic->event_timer.function = NULL;
1753 spin_lock_init(&nesvnic->tx_lock); 1780 spin_lock_init(&nesvnic->tx_lock);
1781 spin_lock_init(&nesvnic->port_ibevent_lock);
1754 nesdev->netdev[nesdev->netdev_count] = netdev; 1782 nesdev->netdev[nesdev->netdev_count] = netdev;
1755 1783
1756 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n", 1784 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
@@ -1763,8 +1791,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1763 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1791 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
1764 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1792 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
1765 u32 u32temp; 1793 u32 u32temp;
1766 u32 link_mask; 1794 u32 link_mask = 0;
1767 u32 link_val; 1795 u32 link_val = 0;
1796 u16 temp_phy_data;
1797 u16 phy_data = 0;
1798 unsigned long flags;
1768 1799
1769 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1800 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1770 (0x200 * (nesdev->mac_index & 1))); 1801 (0x200 * (nesdev->mac_index & 1)));
@@ -1786,6 +1817,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1786 link_val = 0x02020000; 1817 link_val = 0x02020000;
1787 } 1818 }
1788 break; 1819 break;
1820 case NES_PHY_TYPE_SFP_D:
1821 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
1822 nes_read_10G_phy_reg(nesdev,
1823 nesdev->nesadapter->phy_index[nesdev->mac_index],
1824 1, 0x9003);
1825 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1826 nes_read_10G_phy_reg(nesdev,
1827 nesdev->nesadapter->phy_index[nesdev->mac_index],
1828 3, 0x0021);
1829 nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1830 nes_read_10G_phy_reg(nesdev,
1831 nesdev->nesadapter->phy_index[nesdev->mac_index],
1832 3, 0x0021);
1833 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1834 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
1835 phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
1836 break;
1789 default: 1837 default:
1790 link_mask = 0x0f1f0000; 1838 link_mask = 0x0f1f0000;
1791 link_val = 0x0f0f0000; 1839 link_val = 0x0f0f0000;
@@ -1795,8 +1843,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1795 u32temp = nes_read_indexed(nesdev, 1843 u32temp = nes_read_indexed(nesdev,
1796 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1844 NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1797 (0x200 * (nesdev->mac_index & 1))); 1845 (0x200 * (nesdev->mac_index & 1)));
1798 if ((u32temp & link_mask) == link_val) 1846
1799 nesvnic->linkup = 1; 1847 if (phy_type == NES_PHY_TYPE_SFP_D) {
1848 if (phy_data & 0x0004)
1849 nesvnic->linkup = 1;
1850 } else {
1851 if ((u32temp & link_mask) == link_val)
1852 nesvnic->linkup = 1;
1853 }
1800 1854
1801 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1855 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1802 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); 1856 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 99933e4e48ff..26d8018c0a7c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3936,6 +3936,30 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
3936 return nesibdev; 3936 return nesibdev;
3937} 3937}
3938 3938
3939
3940/**
3941 * nes_handle_delayed_event
3942 */
3943static void nes_handle_delayed_event(unsigned long data)
3944{
3945 struct nes_vnic *nesvnic = (void *) data;
3946
3947 if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
3948 struct ib_event event;
3949
3950 event.device = &nesvnic->nesibdev->ibdev;
3951 if (!event.device)
3952 goto stop_timer;
3953 event.event = nesvnic->delayed_event;
3954 event.element.port_num = nesvnic->logical_port + 1;
3955 ib_dispatch_event(&event);
3956 }
3957
3958stop_timer:
3959 nesvnic->event_timer.function = NULL;
3960}
3961
3962
3939void nes_port_ibevent(struct nes_vnic *nesvnic) 3963void nes_port_ibevent(struct nes_vnic *nesvnic)
3940{ 3964{
3941 struct nes_ib_device *nesibdev = nesvnic->nesibdev; 3965 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
@@ -3944,7 +3968,18 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
3944 event.device = &nesibdev->ibdev; 3968 event.device = &nesibdev->ibdev;
3945 event.element.port_num = nesvnic->logical_port + 1; 3969 event.element.port_num = nesvnic->logical_port + 1;
3946 event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3970 event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3947 ib_dispatch_event(&event); 3971
3972 if (!nesvnic->event_timer.function) {
3973 ib_dispatch_event(&event);
3974 nesvnic->last_dispatched_event = event.event;
3975 nesvnic->event_timer.function = nes_handle_delayed_event;
3976 nesvnic->event_timer.data = (unsigned long) nesvnic;
3977 nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
3978 add_timer(&nesvnic->event_timer);
3979 } else {
3980 mod_timer(&nesvnic->event_timer, jiffies + NES_EVENT_DELAY);
3981 }
3982 nesvnic->delayed_event = event.event;
3948} 3983}
3949 3984
3950 3985
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 127a0d5069f0..de799f17cb9e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1694 wake_up(&ppd->cpspec->autoneg_wait); 1694 wake_up(&ppd->cpspec->autoneg_wait);
1695 cancel_delayed_work(&ppd->cpspec->autoneg_work); 1695 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
1696 flush_scheduled_work();
1697 1696
1698 shutdown_7220_relock_poll(ppd->dd); 1697 shutdown_7220_relock_poll(ppd->dd);
1699 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); 1698 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
3515 3514
3516 toggle_7220_rclkrls(ppd->dd); 3515 toggle_7220_rclkrls(ppd->dd);
3517 /* 2 msec is minimum length of a poll cycle */ 3516 /* 2 msec is minimum length of a poll cycle */
3518 schedule_delayed_work(&ppd->cpspec->autoneg_work, 3517 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
3519 msecs_to_jiffies(2)); 3518 msecs_to_jiffies(2));
3520} 3519}
3521 3520
3522/* 3521/*
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index abd409d592ef..b01809a82cb0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -623,7 +623,6 @@ struct qib_chippport_specific {
623 u8 ibmalfusesnap; 623 u8 ibmalfusesnap;
624 struct qib_qsfp_data qsfp_data; 624 struct qib_qsfp_data qsfp_data;
625 char epmsgbuf[192]; /* for port error interrupt msg buffer */ 625 char epmsgbuf[192]; /* for port error interrupt msg buffer */
626 u8 bounced;
627}; 626};
628 627
629static struct { 628static struct {
@@ -1881,23 +1880,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1881 IB_PHYSPORTSTATE_DISABLED) 1880 IB_PHYSPORTSTATE_DISABLED)
1882 qib_set_ib_7322_lstate(ppd, 0, 1881 qib_set_ib_7322_lstate(ppd, 0,
1883 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); 1882 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1884 else { 1883 else
1885 u32 lstate;
1886 /*
1887 * We need the current logical link state before
1888 * lflags are set in handle_e_ibstatuschanged.
1889 */
1890 lstate = qib_7322_iblink_state(ibcs);
1891
1892 if (IS_QMH(dd) && !ppd->cpspec->bounced &&
1893 ltstate == IB_PHYSPORTSTATE_LINKUP &&
1894 (lstate >= IB_PORT_INIT &&
1895 lstate <= IB_PORT_ACTIVE)) {
1896 ppd->cpspec->bounced = 1;
1897 qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
1898 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
1899 }
1900
1901 /* 1884 /*
1902 * Since going into a recovery state causes the link 1885 * Since going into a recovery state causes the link
1903 * state to go down and since recovery is transitory, 1886 * state to go down and since recovery is transitory,
@@ -1911,7 +1894,6 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1911 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && 1894 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1912 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) 1895 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1913 qib_handle_e_ibstatuschanged(ppd, ibcs); 1896 qib_handle_e_ibstatuschanged(ppd, ibcs);
1914 }
1915 } 1897 }
1916 if (*msg && iserr) 1898 if (*msg && iserr)
1917 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); 1899 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2381,6 +2363,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2381 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); 2363 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2382 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); 2364 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2383 2365
2366 /* Hold the link state machine for mezz boards */
2367 if (IS_QMH(dd) || IS_QME(dd))
2368 qib_set_ib_7322_lstate(ppd, 0,
2369 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2370
2384 /* Also enable IBSTATUSCHG interrupt. */ 2371 /* Also enable IBSTATUSCHG interrupt. */
2385 val = qib_read_kreg_port(ppd, krp_errmask); 2372 val = qib_read_kreg_port(ppd, krp_errmask);
2386 qib_write_kreg_port(ppd, krp_errmask, 2373 qib_write_kreg_port(ppd, krp_errmask,
@@ -2406,10 +2393,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2406 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; 2393 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2407 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 2394 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2408 wake_up(&ppd->cpspec->autoneg_wait); 2395 wake_up(&ppd->cpspec->autoneg_wait);
2409 cancel_delayed_work(&ppd->cpspec->autoneg_work); 2396 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2410 if (ppd->dd->cspec->r1) 2397 if (ppd->dd->cspec->r1)
2411 cancel_delayed_work(&ppd->cpspec->ipg_work); 2398 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2412 flush_scheduled_work();
2413 2399
2414 ppd->cpspec->chase_end = 0; 2400 ppd->cpspec->chase_end = 0;
2415 if (ppd->cpspec->chase_timer.data) /* if initted */ 2401 if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2706,7 +2692,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2706 if (!(pins & mask)) { 2692 if (!(pins & mask)) {
2707 ++handled; 2693 ++handled;
2708 qd->t_insert = get_jiffies_64(); 2694 qd->t_insert = get_jiffies_64();
2709 schedule_work(&qd->work); 2695 queue_work(ib_wq, &qd->work);
2710 } 2696 }
2711 } 2697 }
2712 } 2698 }
@@ -4990,8 +4976,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
4990 set_7322_ibspeed_fast(ppd, QIB_IB_DDR); 4976 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
4991 qib_7322_mini_pcs_reset(ppd); 4977 qib_7322_mini_pcs_reset(ppd);
4992 /* 2 msec is minimum length of a poll cycle */ 4978 /* 2 msec is minimum length of a poll cycle */
4993 schedule_delayed_work(&ppd->cpspec->autoneg_work, 4979 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
4994 msecs_to_jiffies(2)); 4980 msecs_to_jiffies(2));
4995} 4981}
4996 4982
4997/* 4983/*
@@ -5121,7 +5107,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
5121 ib_free_send_mad(send_buf); 5107 ib_free_send_mad(send_buf);
5122retry: 5108retry:
5123 delay = 2 << ppd->cpspec->ipg_tries; 5109 delay = 2 << ppd->cpspec->ipg_tries;
5124 schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); 5110 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5111 msecs_to_jiffies(delay));
5125} 5112}
5126 5113
5127/* 5114/*
@@ -5702,6 +5689,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5702 ppd->cpspec->h1_val = h1; 5689 ppd->cpspec->h1_val = h1;
5703 /* now change the IBC and serdes, overriding generic */ 5690 /* now change the IBC and serdes, overriding generic */
5704 init_txdds_table(ppd, 1); 5691 init_txdds_table(ppd, 1);
5692 /* Re-enable the physical state machine on mezz boards
5693 * now that the correct settings have been set. */
5694 if (IS_QMH(dd) || IS_QME(dd))
5695 qib_set_ib_7322_lstate(ppd, 0,
5696 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5705 any++; 5697 any++;
5706 } 5698 }
5707 if (*nxt == '\n') 5699 if (*nxt == '\n')
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 7896afbb9ce8..ffefb78b8949 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
80module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); 80module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
81MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); 81MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
82 82
83struct workqueue_struct *qib_wq;
84struct workqueue_struct *qib_cq_wq; 83struct workqueue_struct *qib_cq_wq;
85 84
86static void verify_interrupt(unsigned long); 85static void verify_interrupt(unsigned long);
@@ -270,23 +269,20 @@ static void init_shadow_tids(struct qib_devdata *dd)
270 struct page **pages; 269 struct page **pages;
271 dma_addr_t *addrs; 270 dma_addr_t *addrs;
272 271
273 pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); 272 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
274 if (!pages) { 273 if (!pages) {
275 qib_dev_err(dd, "failed to allocate shadow page * " 274 qib_dev_err(dd, "failed to allocate shadow page * "
276 "array, no expected sends!\n"); 275 "array, no expected sends!\n");
277 goto bail; 276 goto bail;
278 } 277 }
279 278
280 addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); 279 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
281 if (!addrs) { 280 if (!addrs) {
282 qib_dev_err(dd, "failed to allocate shadow dma handle " 281 qib_dev_err(dd, "failed to allocate shadow dma handle "
283 "array, no expected sends!\n"); 282 "array, no expected sends!\n");
284 goto bail_free; 283 goto bail_free;
285 } 284 }
286 285
287 memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
288 memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
289
290 dd->pageshadow = pages; 286 dd->pageshadow = pages;
291 dd->physshadow = addrs; 287 dd->physshadow = addrs;
292 return; 288 return;
@@ -1047,24 +1043,10 @@ static int __init qlogic_ib_init(void)
1047 if (ret) 1043 if (ret)
1048 goto bail; 1044 goto bail;
1049 1045
1050 /*
1051 * We create our own workqueue mainly because we want to be
1052 * able to flush it when devices are being removed. We can't
1053 * use schedule_work()/flush_scheduled_work() because both
1054 * unregister_netdev() and linkwatch_event take the rtnl lock,
1055 * so flush_scheduled_work() can deadlock during device
1056 * removal.
1057 */
1058 qib_wq = create_workqueue("qib");
1059 if (!qib_wq) {
1060 ret = -ENOMEM;
1061 goto bail_dev;
1062 }
1063
1064 qib_cq_wq = create_singlethread_workqueue("qib_cq"); 1046 qib_cq_wq = create_singlethread_workqueue("qib_cq");
1065 if (!qib_cq_wq) { 1047 if (!qib_cq_wq) {
1066 ret = -ENOMEM; 1048 ret = -ENOMEM;
1067 goto bail_wq; 1049 goto bail_dev;
1068 } 1050 }
1069 1051
1070 /* 1052 /*
@@ -1094,8 +1076,6 @@ bail_unit:
1094 idr_destroy(&qib_unit_table); 1076 idr_destroy(&qib_unit_table);
1095bail_cq_wq: 1077bail_cq_wq:
1096 destroy_workqueue(qib_cq_wq); 1078 destroy_workqueue(qib_cq_wq);
1097bail_wq:
1098 destroy_workqueue(qib_wq);
1099bail_dev: 1079bail_dev:
1100 qib_dev_cleanup(); 1080 qib_dev_cleanup();
1101bail: 1081bail:
@@ -1119,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
1119 1099
1120 pci_unregister_driver(&qib_driver); 1100 pci_unregister_driver(&qib_driver);
1121 1101
1122 destroy_workqueue(qib_wq);
1123 destroy_workqueue(qib_cq_wq); 1102 destroy_workqueue(qib_cq_wq);
1124 1103
1125 qib_cpulist_count = 0; 1104 qib_cpulist_count = 0;
@@ -1292,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
1292 1271
1293 if (qib_mini_init || initfail || ret) { 1272 if (qib_mini_init || initfail || ret) {
1294 qib_stop_timers(dd); 1273 qib_stop_timers(dd);
1295 flush_scheduled_work(); 1274 flush_workqueue(ib_wq);
1296 for (pidx = 0; pidx < dd->num_pports; ++pidx) 1275 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1297 dd->f_quiet_serdes(dd->pport + pidx); 1276 dd->f_quiet_serdes(dd->pport + pidx);
1298 if (qib_mini_init) 1277 if (qib_mini_init)
@@ -1341,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
1341 1320
1342 qib_stop_timers(dd); 1321 qib_stop_timers(dd);
1343 1322
1344 /* wait until all of our (qsfp) schedule_work() calls complete */ 1323 /* wait until all of our (qsfp) queue_work() calls complete */
1345 flush_scheduled_work(); 1324 flush_workqueue(ib_wq);
1346 1325
1347 ret = qibfs_remove(dd); 1326 ret = qibfs_remove(dd);
1348 if (ret) 1327 if (ret)
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 35b3604b691d..3374a52232c1 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
485 goto bail; 485 goto bail;
486 /* We see a module, but it may be unwise to look yet. Just schedule */ 486 /* We see a module, but it may be unwise to look yet. Just schedule */
487 qd->t_insert = get_jiffies_64(); 487 qd->t_insert = get_jiffies_64();
488 schedule_work(&qd->work); 488 queue_work(ib_wq, &qd->work);
489bail: 489bail:
490 return; 490 return;
491} 491}
@@ -493,10 +493,9 @@ bail:
493void qib_qsfp_deinit(struct qib_qsfp_data *qd) 493void qib_qsfp_deinit(struct qib_qsfp_data *qd)
494{ 494{
495 /* 495 /*
496 * There is nothing to do here for now. our 496 * There is nothing to do here for now. our work is scheduled
497 * work is scheduled with schedule_work(), and 497 * with queue_work(), and flush_workqueue() from remove_one
498 * flush_scheduled_work() from remove_one will 498 * will block until all work setup with queue_work()
499 * block until all work ssetup with schedule_work()
500 * completes. 499 * completes.
501 */ 500 */
502} 501}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 63b22a9a7feb..95e5b47223b3 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
806} 806}
807 807
808extern struct workqueue_struct *qib_wq;
809extern struct workqueue_struct *qib_cq_wq; 808extern struct workqueue_struct *qib_cq_wq;
810 809
811/* 810/*
@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
814static inline void qib_schedule_send(struct qib_qp *qp) 813static inline void qib_schedule_send(struct qib_qp *qp)
815{ 814{
816 if (qib_send_ok(qp)) 815 if (qib_send_ok(qp))
817 queue_work(qib_wq, &qp->s_work); 816 queue_work(ib_wq, &qp->s_work);
818} 817}
819 818
820static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 819static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 55855eeabae7..cda8eac55fff 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -24,7 +24,7 @@ config INFINIBAND_IPOIB_CM
24 unless you limit mtu for these destinations to 2044. 24 unless you limit mtu for these destinations to 2044.
25 25
26config INFINIBAND_IPOIB_DEBUG 26config INFINIBAND_IPOIB_DEBUG
27 bool "IP-over-InfiniBand debugging" if EMBEDDED 27 bool "IP-over-InfiniBand debugging" if EXPERT
28 depends on INFINIBAND_IPOIB 28 depends on INFINIBAND_IPOIB
29 default y 29 default y
30 ---help--- 30 ---help---
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c1c49f2d35b5..93d55806b967 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -352,15 +352,13 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
352 int ret; 352 int ret;
353 int i; 353 int i;
354 354
355 rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); 355 rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
356 if (!rx->rx_ring) { 356 if (!rx->rx_ring) {
357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", 357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
358 priv->ca->name, ipoib_recvq_size); 358 priv->ca->name, ipoib_recvq_size);
359 return -ENOMEM; 359 return -ENOMEM;
360 } 360 }
361 361
362 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
363
364 t = kmalloc(sizeof *t, GFP_KERNEL); 362 t = kmalloc(sizeof *t, GFP_KERNEL);
365 if (!t) { 363 if (!t) {
366 ret = -ENOMEM; 364 ret = -ENOMEM;
@@ -1097,13 +1095,12 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1097 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1095 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1098 int ret; 1096 int ret;
1099 1097
1100 p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1098 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
1101 if (!p->tx_ring) { 1099 if (!p->tx_ring) {
1102 ipoib_warn(priv, "failed to allocate tx ring\n"); 1100 ipoib_warn(priv, "failed to allocate tx ring\n");
1103 ret = -ENOMEM; 1101 ret = -ENOMEM;
1104 goto err_tx; 1102 goto err_tx;
1105 } 1103 }
1106 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1107 1104
1108 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1105 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1109 if (IS_ERR(p->qp)) { 1106 if (IS_ERR(p->qp)) {
@@ -1521,7 +1518,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1521 return; 1518 return;
1522 } 1519 }
1523 1520
1524 priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); 1521 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1525 if (!priv->cm.srq_ring) { 1522 if (!priv->cm.srq_ring) {
1526 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1523 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1527 priv->ca->name, ipoib_recvq_size); 1524 priv->ca->name, ipoib_recvq_size);
@@ -1530,7 +1527,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1530 return; 1527 return;
1531 } 1528 }
1532 1529
1533 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1534} 1530}
1535 1531
1536int ipoib_cm_dev_init(struct net_device *dev) 1532int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a07a728fe0d..aca3b44f7aed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -916,13 +916,12 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
916 goto out; 916 goto out;
917 } 917 }
918 918
919 priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 919 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
920 if (!priv->tx_ring) { 920 if (!priv->tx_ring) {
921 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 921 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
922 ca->name, ipoib_sendq_size); 922 ca->name, ipoib_sendq_size);
923 goto out_rx_ring_cleanup; 923 goto out_rx_ring_cleanup;
924 } 924 }
925 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
926 925
927 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 926 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
928 927
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4b62105ed1e8..83664ed2804f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -638,7 +638,7 @@ err:
638 if (target->state == SRP_TARGET_CONNECTING) { 638 if (target->state == SRP_TARGET_CONNECTING) {
639 target->state = SRP_TARGET_DEAD; 639 target->state = SRP_TARGET_DEAD;
640 INIT_WORK(&target->work, srp_remove_work); 640 INIT_WORK(&target->work, srp_remove_work);
641 schedule_work(&target->work); 641 queue_work(ib_wq, &target->work);
642 } 642 }
643 spin_unlock_irq(&target->lock); 643 spin_unlock_irq(&target->lock);
644 644
@@ -1132,15 +1132,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1132 1132
1133 spin_lock_irqsave(&target->lock, flags); 1133 spin_lock_irqsave(&target->lock, flags);
1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1135 if (iu) {
1136 req = list_first_entry(&target->free_reqs, struct srp_request,
1137 list);
1138 list_del(&req->list);
1139 }
1140 spin_unlock_irqrestore(&target->lock, flags);
1141
1142 if (!iu) 1135 if (!iu)
1143 goto err; 1136 goto err_unlock;
1137
1138 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1139 list_del(&req->list);
1140 spin_unlock_irqrestore(&target->lock, flags);
1144 1141
1145 dev = target->srp_host->srp_dev->dev; 1142 dev = target->srp_host->srp_dev->dev;
1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1143 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
@@ -1185,6 +1182,8 @@ err_iu:
1185 1182
1186 spin_lock_irqsave(&target->lock, flags); 1183 spin_lock_irqsave(&target->lock, flags);
1187 list_add(&req->list, &target->free_reqs); 1184 list_add(&req->list, &target->free_reqs);
1185
1186err_unlock:
1188 spin_unlock_irqrestore(&target->lock, flags); 1187 spin_unlock_irqrestore(&target->lock, flags);
1189 1188
1190err: 1189err:
@@ -2199,7 +2198,7 @@ static void srp_remove_one(struct ib_device *device)
2199 * started before we marked our target ports as 2198 * started before we marked our target ports as
2200 * removed, and any target port removal tasks. 2199 * removed, and any target port removal tasks.
2201 */ 2200 */
2202 flush_scheduled_work(); 2201 flush_workqueue(ib_wq);
2203 2202
2204 list_for_each_entry_safe(target, tmp_target, 2203 list_for_each_entry_safe(target, tmp_target,
2205 &host->target_list, list) { 2204 &host->target_list, list) {
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 07c2cd43109c..1903c0f5b925 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -6,7 +6,7 @@ menu "Input device support"
6 depends on !S390 6 depends on !S390
7 7
8config INPUT 8config INPUT
9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED 9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
10 default y 10 default y
11 help 11 help
12 Say Y here if you have any input device (mouse, keyboard, tablet, 12 Say Y here if you have any input device (mouse, keyboard, tablet,
@@ -67,7 +67,7 @@ config INPUT_SPARSEKMAP
67comment "Userland interfaces" 67comment "Userland interfaces"
68 68
69config INPUT_MOUSEDEV 69config INPUT_MOUSEDEV
70 tristate "Mouse interface" if EMBEDDED 70 tristate "Mouse interface" if EXPERT
71 default y 71 default y
72 help 72 help
73 Say Y here if you want your mouse to be accessible as char devices 73 Say Y here if you want your mouse to be accessible as char devices
@@ -150,7 +150,7 @@ config INPUT_EVBUG
150 module will be called evbug. 150 module will be called evbug.
151 151
152config INPUT_APMPOWER 152config INPUT_APMPOWER
153 tristate "Input Power Event -> APM Bridge" if EMBEDDED 153 tristate "Input Power Event -> APM Bridge" if EXPERT
154 depends on INPUT && APM_EMULATION 154 depends on INPUT && APM_EMULATION
155 help 155 help
156 Say Y here if you want suspend key events to trigger a user 156 Say Y here if you want suspend key events to trigger a user
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7b3c0b8fa432..c7a92028f450 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
2# Input core configuration 2# Input core configuration
3# 3#
4menuconfig INPUT_KEYBOARD 4menuconfig INPUT_KEYBOARD
5 bool "Keyboards" if EMBEDDED || !X86 5 bool "Keyboards" if EXPERT || !X86
6 default y 6 default y
7 help 7 help
8 Say Y here, and a list of supported keyboards will be displayed. 8 Say Y here, and a list of supported keyboards will be displayed.
@@ -57,7 +57,7 @@ config KEYBOARD_ATARI
57 module will be called atakbd. 57 module will be called atakbd.
58 58
59config KEYBOARD_ATKBD 59config KEYBOARD_ATKBD
60 tristate "AT keyboard" if EMBEDDED || !X86 60 tristate "AT keyboard" if EXPERT || !X86
61 default y 61 default y
62 select SERIO 62 select SERIO
63 select SERIO_LIBPS2 63 select SERIO_LIBPS2
@@ -343,6 +343,16 @@ config KEYBOARD_NOMADIK
343 To compile this driver as a module, choose M here: the 343 To compile this driver as a module, choose M here: the
344 module will be called nmk-ske-keypad. 344 module will be called nmk-ske-keypad.
345 345
346config KEYBOARD_TEGRA
347 tristate "NVIDIA Tegra internal matrix keyboard controller support"
348 depends on ARCH_TEGRA
349 help
350 Say Y here if you want to use a matrix keyboard connected directly
351 to the internal keyboard controller on Tegra SoCs.
352
353 To compile this driver as a module, choose M here: the
354 module will be called tegra-kbc.
355
346config KEYBOARD_OPENCORES 356config KEYBOARD_OPENCORES
347 tristate "OpenCores Keyboard Controller" 357 tristate "OpenCores Keyboard Controller"
348 help 358 help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 4e5571b72cda..468c627a2844 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
42obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o 42obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
43obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o 43obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
44obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o 44obj-$(CONFIG_KEYBOARD_TC3589X) += tc3589x-keypad.o
45obj-$(CONFIG_KEYBOARD_TEGRA) += tegra-kbc.o
45obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o 46obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
46obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o 47obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
47obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o 48obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6069abe31e42..eb3006361ee4 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -322,7 +322,7 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
322 struct gpio_keys_button *button = bdata->button; 322 struct gpio_keys_button *button = bdata->button;
323 struct input_dev *input = bdata->input; 323 struct input_dev *input = bdata->input;
324 unsigned int type = button->type ?: EV_KEY; 324 unsigned int type = button->type ?: EV_KEY;
325 int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low; 325 int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
326 326
327 input_event(input, type, button->code, !!state); 327 input_event(input, type, button->code, !!state);
328 input_sync(input); 328 input_sync(input);
@@ -410,8 +410,8 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
410 if (!button->can_disable) 410 if (!button->can_disable)
411 irqflags |= IRQF_SHARED; 411 irqflags |= IRQF_SHARED;
412 412
413 error = request_irq(irq, gpio_keys_isr, irqflags, desc, bdata); 413 error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
414 if (error) { 414 if (error < 0) {
415 dev_err(dev, "Unable to claim irq %d; error %d\n", 415 dev_err(dev, "Unable to claim irq %d; error %d\n",
416 irq, error); 416 irq, error);
417 goto fail3; 417 goto fail3;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
new file mode 100644
index 000000000000..ac471b77c18e
--- /dev/null
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -0,0 +1,727 @@
1/*
2 * Keyboard class input driver for the NVIDIA Tegra SoC internal matrix
3 * keyboard controller
4 *
5 * Copyright (c) 2009-2011, NVIDIA Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/input.h>
24#include <linux/platform_device.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/interrupt.h>
28#include <linux/clk.h>
29#include <linux/slab.h>
30#include <mach/clk.h>
31#include <mach/kbc.h>
32
33#define KBC_MAX_DEBOUNCE_CNT 0x3ffu
34
35/* KBC row scan time and delay for beginning the row scan. */
36#define KBC_ROW_SCAN_TIME 16
37#define KBC_ROW_SCAN_DLY 5
38
39/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
40#define KBC_CYCLE_USEC 32
41
42/* KBC Registers */
43
44/* KBC Control Register */
45#define KBC_CONTROL_0 0x0
46#define KBC_FIFO_TH_CNT_SHIFT(cnt) (cnt << 14)
47#define KBC_DEBOUNCE_CNT_SHIFT(cnt) (cnt << 4)
48#define KBC_CONTROL_FIFO_CNT_INT_EN (1 << 3)
49#define KBC_CONTROL_KBC_EN (1 << 0)
50
51/* KBC Interrupt Register */
52#define KBC_INT_0 0x4
53#define KBC_INT_FIFO_CNT_INT_STATUS (1 << 2)
54
55#define KBC_ROW_CFG0_0 0x8
56#define KBC_COL_CFG0_0 0x18
57#define KBC_INIT_DLY_0 0x28
58#define KBC_RPT_DLY_0 0x2c
59#define KBC_KP_ENT0_0 0x30
60#define KBC_KP_ENT1_0 0x34
61#define KBC_ROW0_MASK_0 0x38
62
63#define KBC_ROW_SHIFT 3
64
65struct tegra_kbc {
66 void __iomem *mmio;
67 struct input_dev *idev;
68 unsigned int irq;
69 unsigned int wake_enable_rows;
70 unsigned int wake_enable_cols;
71 spinlock_t lock;
72 unsigned int repoll_dly;
73 unsigned long cp_dly_jiffies;
74 const struct tegra_kbc_platform_data *pdata;
75 unsigned short keycode[KBC_MAX_KEY];
76 unsigned short current_keys[KBC_MAX_KPENT];
77 unsigned int num_pressed_keys;
78 struct timer_list timer;
79 struct clk *clk;
80};
81
82static const u32 tegra_kbc_default_keymap[] = {
83 KEY(0, 2, KEY_W),
84 KEY(0, 3, KEY_S),
85 KEY(0, 4, KEY_A),
86 KEY(0, 5, KEY_Z),
87 KEY(0, 7, KEY_FN),
88
89 KEY(1, 7, KEY_LEFTMETA),
90
91 KEY(2, 6, KEY_RIGHTALT),
92 KEY(2, 7, KEY_LEFTALT),
93
94 KEY(3, 0, KEY_5),
95 KEY(3, 1, KEY_4),
96 KEY(3, 2, KEY_R),
97 KEY(3, 3, KEY_E),
98 KEY(3, 4, KEY_F),
99 KEY(3, 5, KEY_D),
100 KEY(3, 6, KEY_X),
101
102 KEY(4, 0, KEY_7),
103 KEY(4, 1, KEY_6),
104 KEY(4, 2, KEY_T),
105 KEY(4, 3, KEY_H),
106 KEY(4, 4, KEY_G),
107 KEY(4, 5, KEY_V),
108 KEY(4, 6, KEY_C),
109 KEY(4, 7, KEY_SPACE),
110
111 KEY(5, 0, KEY_9),
112 KEY(5, 1, KEY_8),
113 KEY(5, 2, KEY_U),
114 KEY(5, 3, KEY_Y),
115 KEY(5, 4, KEY_J),
116 KEY(5, 5, KEY_N),
117 KEY(5, 6, KEY_B),
118 KEY(5, 7, KEY_BACKSLASH),
119
120 KEY(6, 0, KEY_MINUS),
121 KEY(6, 1, KEY_0),
122 KEY(6, 2, KEY_O),
123 KEY(6, 3, KEY_I),
124 KEY(6, 4, KEY_L),
125 KEY(6, 5, KEY_K),
126 KEY(6, 6, KEY_COMMA),
127 KEY(6, 7, KEY_M),
128
129 KEY(7, 1, KEY_EQUAL),
130 KEY(7, 2, KEY_RIGHTBRACE),
131 KEY(7, 3, KEY_ENTER),
132 KEY(7, 7, KEY_MENU),
133
134 KEY(8, 4, KEY_RIGHTSHIFT),
135 KEY(8, 5, KEY_LEFTSHIFT),
136
137 KEY(9, 5, KEY_RIGHTCTRL),
138 KEY(9, 7, KEY_LEFTCTRL),
139
140 KEY(11, 0, KEY_LEFTBRACE),
141 KEY(11, 1, KEY_P),
142 KEY(11, 2, KEY_APOSTROPHE),
143 KEY(11, 3, KEY_SEMICOLON),
144 KEY(11, 4, KEY_SLASH),
145 KEY(11, 5, KEY_DOT),
146
147 KEY(12, 0, KEY_F10),
148 KEY(12, 1, KEY_F9),
149 KEY(12, 2, KEY_BACKSPACE),
150 KEY(12, 3, KEY_3),
151 KEY(12, 4, KEY_2),
152 KEY(12, 5, KEY_UP),
153 KEY(12, 6, KEY_PRINT),
154 KEY(12, 7, KEY_PAUSE),
155
156 KEY(13, 0, KEY_INSERT),
157 KEY(13, 1, KEY_DELETE),
158 KEY(13, 3, KEY_PAGEUP),
159 KEY(13, 4, KEY_PAGEDOWN),
160 KEY(13, 5, KEY_RIGHT),
161 KEY(13, 6, KEY_DOWN),
162 KEY(13, 7, KEY_LEFT),
163
164 KEY(14, 0, KEY_F11),
165 KEY(14, 1, KEY_F12),
166 KEY(14, 2, KEY_F8),
167 KEY(14, 3, KEY_Q),
168 KEY(14, 4, KEY_F4),
169 KEY(14, 5, KEY_F3),
170 KEY(14, 6, KEY_1),
171 KEY(14, 7, KEY_F7),
172
173 KEY(15, 0, KEY_ESC),
174 KEY(15, 1, KEY_GRAVE),
175 KEY(15, 2, KEY_F5),
176 KEY(15, 3, KEY_TAB),
177 KEY(15, 4, KEY_F1),
178 KEY(15, 5, KEY_F2),
179 KEY(15, 6, KEY_CAPSLOCK),
180 KEY(15, 7, KEY_F6),
181};
182
183static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
184 .keymap = tegra_kbc_default_keymap,
185 .keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
186};
187
188static void tegra_kbc_report_released_keys(struct input_dev *input,
189 unsigned short old_keycodes[],
190 unsigned int old_num_keys,
191 unsigned short new_keycodes[],
192 unsigned int new_num_keys)
193{
194 unsigned int i, j;
195
196 for (i = 0; i < old_num_keys; i++) {
197 for (j = 0; j < new_num_keys; j++)
198 if (old_keycodes[i] == new_keycodes[j])
199 break;
200
201 if (j == new_num_keys)
202 input_report_key(input, old_keycodes[i], 0);
203 }
204}
205
206static void tegra_kbc_report_pressed_keys(struct input_dev *input,
207 unsigned char scancodes[],
208 unsigned short keycodes[],
209 unsigned int num_pressed_keys)
210{
211 unsigned int i;
212
213 for (i = 0; i < num_pressed_keys; i++) {
214 input_event(input, EV_MSC, MSC_SCAN, scancodes[i]);
215 input_report_key(input, keycodes[i], 1);
216 }
217}
218
219static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
220{
221 unsigned char scancodes[KBC_MAX_KPENT];
222 unsigned short keycodes[KBC_MAX_KPENT];
223 u32 val = 0;
224 unsigned int i;
225 unsigned int num_down = 0;
226 unsigned long flags;
227
228 spin_lock_irqsave(&kbc->lock, flags);
229 for (i = 0; i < KBC_MAX_KPENT; i++) {
230 if ((i % 4) == 0)
231 val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
232
233 if (val & 0x80) {
234 unsigned int col = val & 0x07;
235 unsigned int row = (val >> 3) & 0x0f;
236 unsigned char scancode =
237 MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
238
239 scancodes[num_down] = scancode;
240 keycodes[num_down++] = kbc->keycode[scancode];
241 }
242
243 val >>= 8;
244 }
245 spin_unlock_irqrestore(&kbc->lock, flags);
246
247 tegra_kbc_report_released_keys(kbc->idev,
248 kbc->current_keys, kbc->num_pressed_keys,
249 keycodes, num_down);
250 tegra_kbc_report_pressed_keys(kbc->idev, scancodes, keycodes, num_down);
251 input_sync(kbc->idev);
252
253 memcpy(kbc->current_keys, keycodes, sizeof(kbc->current_keys));
254 kbc->num_pressed_keys = num_down;
255}
256
257static void tegra_kbc_keypress_timer(unsigned long data)
258{
259 struct tegra_kbc *kbc = (struct tegra_kbc *)data;
260 unsigned long flags;
261 u32 val;
262 unsigned int i;
263
264 val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
265 if (val) {
266 unsigned long dly;
267
268 tegra_kbc_report_keys(kbc);
269
270 /*
271 * If more than one keys are pressed we need not wait
272 * for the repoll delay.
273 */
274 dly = (val == 1) ? kbc->repoll_dly : 1;
275 mod_timer(&kbc->timer, jiffies + msecs_to_jiffies(dly));
276 } else {
277 /* Release any pressed keys and exit the polling loop */
278 for (i = 0; i < kbc->num_pressed_keys; i++)
279 input_report_key(kbc->idev, kbc->current_keys[i], 0);
280 input_sync(kbc->idev);
281
282 kbc->num_pressed_keys = 0;
283
284 /* All keys are released so enable the keypress interrupt */
285 spin_lock_irqsave(&kbc->lock, flags);
286 val = readl(kbc->mmio + KBC_CONTROL_0);
287 val |= KBC_CONTROL_FIFO_CNT_INT_EN;
288 writel(val, kbc->mmio + KBC_CONTROL_0);
289 spin_unlock_irqrestore(&kbc->lock, flags);
290 }
291}
292
293static irqreturn_t tegra_kbc_isr(int irq, void *args)
294{
295 struct tegra_kbc *kbc = args;
296 u32 val, ctl;
297
298 /*
299 * Until all keys are released, defer further processing to
300 * the polling loop in tegra_kbc_keypress_timer
301 */
302 ctl = readl(kbc->mmio + KBC_CONTROL_0);
303 ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
304 writel(ctl, kbc->mmio + KBC_CONTROL_0);
305
306 /*
307 * Quickly bail out & reenable interrupts if the fifo threshold
308 * count interrupt wasn't the interrupt source
309 */
310 val = readl(kbc->mmio + KBC_INT_0);
311 writel(val, kbc->mmio + KBC_INT_0);
312
313 if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
314 /*
315 * Schedule timer to run when hardware is in continuous
316 * polling mode.
317 */
318 mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
319 } else {
320 ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
321 writel(ctl, kbc->mmio + KBC_CONTROL_0);
322 }
323
324 return IRQ_HANDLED;
325}
326
327static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
328{
329 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
330 int i;
331 unsigned int rst_val;
332
333 BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
334 rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
335
336 for (i = 0; i < KBC_MAX_ROW; i++)
337 writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
338
339 if (filter) {
340 for (i = 0; i < pdata->wake_cnt; i++) {
341 u32 val, addr;
342 addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
343 val = readl(kbc->mmio + addr);
344 val &= ~(1 << pdata->wake_cfg[i].col);
345 writel(val, kbc->mmio + addr);
346 }
347 }
348}
349
350static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
351{
352 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
353 int i;
354
355 for (i = 0; i < KBC_MAX_GPIO; i++) {
356 u32 r_shft = 5 * (i % 6);
357 u32 c_shft = 4 * (i % 8);
358 u32 r_mask = 0x1f << r_shft;
359 u32 c_mask = 0x0f << c_shft;
360 u32 r_offs = (i / 6) * 4 + KBC_ROW_CFG0_0;
361 u32 c_offs = (i / 8) * 4 + KBC_COL_CFG0_0;
362 u32 row_cfg = readl(kbc->mmio + r_offs);
363 u32 col_cfg = readl(kbc->mmio + c_offs);
364
365 row_cfg &= ~r_mask;
366 col_cfg &= ~c_mask;
367
368 if (pdata->pin_cfg[i].is_row)
369 row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
370 else
371 col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
372
373 writel(row_cfg, kbc->mmio + r_offs);
374 writel(col_cfg, kbc->mmio + c_offs);
375 }
376}
377
378static int tegra_kbc_start(struct tegra_kbc *kbc)
379{
380 const struct tegra_kbc_platform_data *pdata = kbc->pdata;
381 unsigned long flags;
382 unsigned int debounce_cnt;
383 u32 val = 0;
384
385 clk_enable(kbc->clk);
386
387 /* Reset the KBC controller to clear all previous status.*/
388 tegra_periph_reset_assert(kbc->clk);
389 udelay(100);
390 tegra_periph_reset_deassert(kbc->clk);
391 udelay(100);
392
393 tegra_kbc_config_pins(kbc);
394 tegra_kbc_setup_wakekeys(kbc, false);
395
396 writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
397
398 /* Keyboard debounce count is maximum of 12 bits. */
399 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
400 val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
401 val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
402 val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */
403 val |= KBC_CONTROL_KBC_EN; /* enable */
404 writel(val, kbc->mmio + KBC_CONTROL_0);
405
406 /*
407 * Compute the delay(ns) from interrupt mode to continuous polling
408 * mode so the timer routine is scheduled appropriately.
409 */
410 val = readl(kbc->mmio + KBC_INIT_DLY_0);
411 kbc->cp_dly_jiffies = usecs_to_jiffies((val & 0xfffff) * 32);
412
413 kbc->num_pressed_keys = 0;
414
415 /*
416 * Atomically clear out any remaining entries in the key FIFO
417 * and enable keyboard interrupts.
418 */
419 spin_lock_irqsave(&kbc->lock, flags);
420 while (1) {
421 val = readl(kbc->mmio + KBC_INT_0);
422 val >>= 4;
423 if (!val)
424 break;
425
426 val = readl(kbc->mmio + KBC_KP_ENT0_0);
427 val = readl(kbc->mmio + KBC_KP_ENT1_0);
428 }
429 writel(0x7, kbc->mmio + KBC_INT_0);
430 spin_unlock_irqrestore(&kbc->lock, flags);
431
432 enable_irq(kbc->irq);
433
434 return 0;
435}
436
437static void tegra_kbc_stop(struct tegra_kbc *kbc)
438{
439 unsigned long flags;
440 u32 val;
441
442 spin_lock_irqsave(&kbc->lock, flags);
443 val = readl(kbc->mmio + KBC_CONTROL_0);
444 val &= ~1;
445 writel(val, kbc->mmio + KBC_CONTROL_0);
446 spin_unlock_irqrestore(&kbc->lock, flags);
447
448 disable_irq(kbc->irq);
449 del_timer_sync(&kbc->timer);
450
451 clk_disable(kbc->clk);
452}
453
454static int tegra_kbc_open(struct input_dev *dev)
455{
456 struct tegra_kbc *kbc = input_get_drvdata(dev);
457
458 return tegra_kbc_start(kbc);
459}
460
461static void tegra_kbc_close(struct input_dev *dev)
462{
463 struct tegra_kbc *kbc = input_get_drvdata(dev);
464
465 return tegra_kbc_stop(kbc);
466}
467
468static bool __devinit
469tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
470 struct device *dev, unsigned int *num_rows)
471{
472 int i;
473
474 *num_rows = 0;
475
476 for (i = 0; i < KBC_MAX_GPIO; i++) {
477 const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
478
479 if (pin_cfg->is_row) {
480 if (pin_cfg->num >= KBC_MAX_ROW) {
481 dev_err(dev,
482 "pin_cfg[%d]: invalid row number %d\n",
483 i, pin_cfg->num);
484 return false;
485 }
486 (*num_rows)++;
487 } else {
488 if (pin_cfg->num >= KBC_MAX_COL) {
489 dev_err(dev,
490 "pin_cfg[%d]: invalid column number %d\n",
491 i, pin_cfg->num);
492 return false;
493 }
494 }
495 }
496
497 return true;
498}
499
500static int __devinit tegra_kbc_probe(struct platform_device *pdev)
501{
502 const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
503 const struct matrix_keymap_data *keymap_data;
504 struct tegra_kbc *kbc;
505 struct input_dev *input_dev;
506 struct resource *res;
507 int irq;
508 int err;
509 int i;
510 int num_rows = 0;
511 unsigned int debounce_cnt;
512 unsigned int scan_time_rows;
513
514 if (!pdata)
515 return -EINVAL;
516
517 if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
518 return -EINVAL;
519
520 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
521 if (!res) {
522 dev_err(&pdev->dev, "failed to get I/O memory\n");
523 return -ENXIO;
524 }
525
526 irq = platform_get_irq(pdev, 0);
527 if (irq < 0) {
528 dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
529 return -ENXIO;
530 }
531
532 kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
533 input_dev = input_allocate_device();
534 if (!kbc || !input_dev) {
535 err = -ENOMEM;
536 goto err_free_mem;
537 }
538
539 kbc->pdata = pdata;
540 kbc->idev = input_dev;
541 kbc->irq = irq;
542 spin_lock_init(&kbc->lock);
543 setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
544
545 res = request_mem_region(res->start, resource_size(res), pdev->name);
546 if (!res) {
547 dev_err(&pdev->dev, "failed to request I/O memory\n");
548 err = -EBUSY;
549 goto err_free_mem;
550 }
551
552 kbc->mmio = ioremap(res->start, resource_size(res));
553 if (!kbc->mmio) {
554 dev_err(&pdev->dev, "failed to remap I/O memory\n");
555 err = -ENXIO;
556 goto err_free_mem_region;
557 }
558
559 kbc->clk = clk_get(&pdev->dev, NULL);
560 if (IS_ERR(kbc->clk)) {
561 dev_err(&pdev->dev, "failed to get keyboard clock\n");
562 err = PTR_ERR(kbc->clk);
563 goto err_iounmap;
564 }
565
566 kbc->wake_enable_rows = 0;
567 kbc->wake_enable_cols = 0;
568 for (i = 0; i < pdata->wake_cnt; i++) {
569 kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
570 kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
571 }
572
573 /*
574 * The time delay between two consecutive reads of the FIFO is
575 * the sum of the repeat time and the time taken for scanning
576 * the rows. There is an additional delay before the row scanning
577 * starts. The repoll delay is computed in milliseconds.
578 */
579 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
580 scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
581 kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
582 kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
583
584 input_dev->name = pdev->name;
585 input_dev->id.bustype = BUS_HOST;
586 input_dev->dev.parent = &pdev->dev;
587 input_dev->open = tegra_kbc_open;
588 input_dev->close = tegra_kbc_close;
589
590 input_set_drvdata(input_dev, kbc);
591
592 input_dev->evbit[0] = BIT_MASK(EV_KEY);
593 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
594
595 input_dev->keycode = kbc->keycode;
596 input_dev->keycodesize = sizeof(kbc->keycode[0]);
597 input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
598
599 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
600 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
601 input_dev->keycode, input_dev->keybit);
602
603 err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
604 pdev->name, kbc);
605 if (err) {
606 dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
607 goto err_put_clk;
608 }
609
610 disable_irq(kbc->irq);
611
612 err = input_register_device(kbc->idev);
613 if (err) {
614 dev_err(&pdev->dev, "failed to register input device\n");
615 goto err_free_irq;
616 }
617
618 platform_set_drvdata(pdev, kbc);
619 device_init_wakeup(&pdev->dev, pdata->wakeup);
620
621 return 0;
622
623err_free_irq:
624 free_irq(kbc->irq, pdev);
625err_put_clk:
626 clk_put(kbc->clk);
627err_iounmap:
628 iounmap(kbc->mmio);
629err_free_mem_region:
630 release_mem_region(res->start, resource_size(res));
631err_free_mem:
632 input_free_device(kbc->idev);
633 kfree(kbc);
634
635 return err;
636}
637
638static int __devexit tegra_kbc_remove(struct platform_device *pdev)
639{
640 struct tegra_kbc *kbc = platform_get_drvdata(pdev);
641 struct resource *res;
642
643 free_irq(kbc->irq, pdev);
644 clk_put(kbc->clk);
645
646 input_unregister_device(kbc->idev);
647 iounmap(kbc->mmio);
648 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
649 release_mem_region(res->start, resource_size(res));
650
651 kfree(kbc);
652
653 platform_set_drvdata(pdev, NULL);
654
655 return 0;
656}
657
658#ifdef CONFIG_PM_SLEEP
659static int tegra_kbc_suspend(struct device *dev)
660{
661 struct platform_device *pdev = to_platform_device(dev);
662 struct tegra_kbc *kbc = platform_get_drvdata(pdev);
663
664 if (device_may_wakeup(&pdev->dev)) {
665 tegra_kbc_setup_wakekeys(kbc, true);
666 enable_irq_wake(kbc->irq);
667 /* Forcefully clear the interrupt status */
668 writel(0x7, kbc->mmio + KBC_INT_0);
669 msleep(30);
670 } else {
671 mutex_lock(&kbc->idev->mutex);
672 if (kbc->idev->users)
673 tegra_kbc_stop(kbc);
674 mutex_unlock(&kbc->idev->mutex);
675 }
676
677 return 0;
678}
679
680static int tegra_kbc_resume(struct device *dev)
681{
682 struct platform_device *pdev = to_platform_device(dev);
683 struct tegra_kbc *kbc = platform_get_drvdata(pdev);
684 int err = 0;
685
686 if (device_may_wakeup(&pdev->dev)) {
687 disable_irq_wake(kbc->irq);
688 tegra_kbc_setup_wakekeys(kbc, false);
689 } else {
690 mutex_lock(&kbc->idev->mutex);
691 if (kbc->idev->users)
692 err = tegra_kbc_start(kbc);
693 mutex_unlock(&kbc->idev->mutex);
694 }
695
696 return err;
697}
698#endif
699
700static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
701
702static struct platform_driver tegra_kbc_driver = {
703 .probe = tegra_kbc_probe,
704 .remove = __devexit_p(tegra_kbc_remove),
705 .driver = {
706 .name = "tegra-kbc",
707 .owner = THIS_MODULE,
708 .pm = &tegra_kbc_pm_ops,
709 },
710};
711
712static void __exit tegra_kbc_exit(void)
713{
714 platform_driver_unregister(&tegra_kbc_driver);
715}
716module_exit(tegra_kbc_exit);
717
718static int __init tegra_kbc_init(void)
719{
720 return platform_driver_register(&tegra_kbc_driver);
721}
722module_init(tegra_kbc_init);
723
724MODULE_LICENSE("GPL");
725MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
726MODULE_DESCRIPTION("Tegra matrix keyboard controller driver");
727MODULE_ALIAS("platform:tegra-kbc");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index b4a81ebfab92..c8f097a15d89 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/err.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/input.h> 19#include <linux/input.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -219,9 +220,9 @@ static int __devinit keypad_probe(struct platform_device *pdev)
219 } 220 }
220 221
221 kp->clk = clk_get(dev, NULL); 222 kp->clk = clk_get(dev, NULL);
222 if (!kp->clk) { 223 if (IS_ERR(kp->clk)) {
223 dev_err(dev, "cannot claim device clock\n"); 224 dev_err(dev, "cannot claim device clock\n");
224 error = -EINVAL; 225 error = PTR_ERR(kp->clk);
225 goto error_clk; 226 goto error_clk;
226 } 227 }
227 228
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9dfd6e5f786f..1f38302a5951 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -69,11 +69,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
69 } 69 }
70 70
71 if (value > 20 && value < 32767) 71 if (value > 20 && value < 32767)
72#ifndef FREQ 72 count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
73 count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
74#else
75 count = (FREQ / (value * 4)) - 1;
76#endif
77 73
78 ixp4xx_spkr_control(pin, count); 74 ixp4xx_spkr_control(pin, count);
79 75
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index bf5fd7f6a313..9c1e6ee83531 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -39,7 +39,7 @@ config MOUSE_PS2
39 module will be called psmouse. 39 module will be called psmouse.
40 40
41config MOUSE_PS2_ALPS 41config MOUSE_PS2_ALPS
42 bool "ALPS PS/2 mouse protocol extension" if EMBEDDED 42 bool "ALPS PS/2 mouse protocol extension" if EXPERT
43 default y 43 default y
44 depends on MOUSE_PS2 44 depends on MOUSE_PS2
45 help 45 help
@@ -49,7 +49,7 @@ config MOUSE_PS2_ALPS
49 If unsure, say Y. 49 If unsure, say Y.
50 50
51config MOUSE_PS2_LOGIPS2PP 51config MOUSE_PS2_LOGIPS2PP
52 bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED 52 bool "Logitech PS/2++ mouse protocol extension" if EXPERT
53 default y 53 default y
54 depends on MOUSE_PS2 54 depends on MOUSE_PS2
55 help 55 help
@@ -59,7 +59,7 @@ config MOUSE_PS2_LOGIPS2PP
59 If unsure, say Y. 59 If unsure, say Y.
60 60
61config MOUSE_PS2_SYNAPTICS 61config MOUSE_PS2_SYNAPTICS
62 bool "Synaptics PS/2 mouse protocol extension" if EMBEDDED 62 bool "Synaptics PS/2 mouse protocol extension" if EXPERT
63 default y 63 default y
64 depends on MOUSE_PS2 64 depends on MOUSE_PS2
65 help 65 help
@@ -69,7 +69,7 @@ config MOUSE_PS2_SYNAPTICS
69 If unsure, say Y. 69 If unsure, say Y.
70 70
71config MOUSE_PS2_LIFEBOOK 71config MOUSE_PS2_LIFEBOOK
72 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED 72 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
73 default y 73 default y
74 depends on MOUSE_PS2 && X86 && DMI 74 depends on MOUSE_PS2 && X86 && DMI
75 help 75 help
@@ -79,7 +79,7 @@ config MOUSE_PS2_LIFEBOOK
79 If unsure, say Y. 79 If unsure, say Y.
80 80
81config MOUSE_PS2_TRACKPOINT 81config MOUSE_PS2_TRACKPOINT
82 bool "IBM Trackpoint PS/2 mouse protocol extension" if EMBEDDED 82 bool "IBM Trackpoint PS/2 mouse protocol extension" if EXPERT
83 default y 83 default y
84 depends on MOUSE_PS2 84 depends on MOUSE_PS2
85 help 85 help
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index da392c22fc6c..aa186cf6c514 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -755,23 +755,26 @@ static int synaptics_reconnect(struct psmouse *psmouse)
755{ 755{
756 struct synaptics_data *priv = psmouse->private; 756 struct synaptics_data *priv = psmouse->private;
757 struct synaptics_data old_priv = *priv; 757 struct synaptics_data old_priv = *priv;
758 int retry = 0;
759 int error;
758 760
759 psmouse_reset(psmouse); 761 do {
762 psmouse_reset(psmouse);
763 error = synaptics_detect(psmouse, 0);
764 } while (error && ++retry < 3);
760 765
761 if (synaptics_detect(psmouse, 0)) 766 if (error)
762 return -1; 767 return -1;
763 768
769 if (retry > 1)
770 printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
771 retry);
772
764 if (synaptics_query_hardware(psmouse)) { 773 if (synaptics_query_hardware(psmouse)) {
765 printk(KERN_ERR "Unable to query Synaptics hardware.\n"); 774 printk(KERN_ERR "Unable to query Synaptics hardware.\n");
766 return -1; 775 return -1;
767 } 776 }
768 777
769 if (old_priv.identity != priv->identity ||
770 old_priv.model_id != priv->model_id ||
771 old_priv.capabilities != priv->capabilities ||
772 old_priv.ext_cap != priv->ext_cap)
773 return -1;
774
775 if (synaptics_set_absolute_mode(psmouse)) { 778 if (synaptics_set_absolute_mode(psmouse)) {
776 printk(KERN_ERR "Unable to initialize Synaptics hardware.\n"); 779 printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
777 return -1; 780 return -1;
@@ -782,6 +785,19 @@ static int synaptics_reconnect(struct psmouse *psmouse)
782 return -1; 785 return -1;
783 } 786 }
784 787
788 if (old_priv.identity != priv->identity ||
789 old_priv.model_id != priv->model_id ||
790 old_priv.capabilities != priv->capabilities ||
791 old_priv.ext_cap != priv->ext_cap) {
792 printk(KERN_ERR "Synaptics hardware appears to be different: "
793 "id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
794 old_priv.identity, priv->identity,
795 old_priv.model_id, priv->model_id,
796 old_priv.capabilities, priv->capabilities,
797 old_priv.ext_cap, priv->ext_cap);
798 return -1;
799 }
800
785 return 0; 801 return 0;
786} 802}
787 803
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 307eef77a172..55f2c2293ec6 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
2# Input core configuration 2# Input core configuration
3# 3#
4config SERIO 4config SERIO
5 tristate "Serial I/O support" if EMBEDDED || !X86 5 tristate "Serial I/O support" if EXPERT || !X86
6 default y 6 default y
7 help 7 help
8 Say Yes here if you have any input device that uses serial I/O to 8 Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@ config SERIO
19if SERIO 19if SERIO
20 20
21config SERIO_I8042 21config SERIO_I8042
22 tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86 22 tristate "i8042 PC Keyboard controller" if EXPERT || !X86
23 default y 23 default y
24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ 24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN 25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@ config SERIO_MACEPS2
168 module will be called maceps2. 168 module will be called maceps2.
169 169
170config SERIO_LIBPS2 170config SERIO_LIBPS2
171 tristate "PS/2 driver library" if EMBEDDED 171 tristate "PS/2 driver library" if EXPERT
172 depends on SERIO_I8042 || SERIO_I8042=n 172 depends on SERIO_I8042 || SERIO_I8042=n
173 help 173 help
174 Say Y here if you are using a driver for device connected 174 Say Y here if you are using a driver for device connected
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 448c7724beb9..852816567241 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -111,9 +111,11 @@ static void ct82c710_close(struct serio *serio)
111static int ct82c710_open(struct serio *serio) 111static int ct82c710_open(struct serio *serio)
112{ 112{
113 unsigned char status; 113 unsigned char status;
114 int err;
114 115
115 if (request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL)) 116 err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL);
116 return -1; 117 if (err)
118 return err;
117 119
118 status = inb_p(CT82C710_STATUS); 120 status = inb_p(CT82C710_STATUS);
119 121
@@ -131,7 +133,7 @@ static int ct82c710_open(struct serio *serio)
131 status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON); 133 status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON);
132 outb_p(status, CT82C710_STATUS); 134 outb_p(status, CT82C710_STATUS);
133 free_irq(CT82C710_IRQ, NULL); 135 free_irq(CT82C710_IRQ, NULL);
134 return -1; 136 return -EBUSY;
135 } 137 }
136 138
137 return 0; 139 return 0;
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 6e362de3f412..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -116,14 +116,15 @@ static void serport_ldisc_close(struct tty_struct *tty)
116 116
117/* 117/*
118 * serport_ldisc_receive() is called by the low level tty driver when characters 118 * serport_ldisc_receive() is called by the low level tty driver when characters
119 * are ready for us. We forward the characters, one by one to the 'interrupt' 119 * are ready for us. We forward the characters and flags, one by one to the
120 * routine. 120 * 'interrupt' routine.
121 */ 121 */
122 122
123static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) 123static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
124{ 124{
125 struct serport *serport = (struct serport*) tty->disc_data; 125 struct serport *serport = (struct serport*) tty->disc_data;
126 unsigned long flags; 126 unsigned long flags;
127 unsigned int ch_flags;
127 int i; 128 int i;
128 129
129 spin_lock_irqsave(&serport->lock, flags); 130 spin_lock_irqsave(&serport->lock, flags);
@@ -131,8 +132,23 @@ static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *c
131 if (!test_bit(SERPORT_ACTIVE, &serport->flags)) 132 if (!test_bit(SERPORT_ACTIVE, &serport->flags))
132 goto out; 133 goto out;
133 134
134 for (i = 0; i < count; i++) 135 for (i = 0; i < count; i++) {
135 serio_interrupt(serport->serio, cp[i], 0); 136 switch (fp[i]) {
137 case TTY_FRAME:
138 ch_flags = SERIO_FRAME;
139 break;
140
141 case TTY_PARITY:
142 ch_flags = SERIO_PARITY;
143 break;
144
145 default:
146 ch_flags = 0;
147 break;
148 }
149
150 serio_interrupt(serport->serio, cp[i], ch_flags);
151 }
136 152
137out: 153out:
138 spin_unlock_irqrestore(&serport->lock, flags); 154 spin_unlock_irqrestore(&serport->lock, flags);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a29a7812bd46..7729e547ba65 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -201,6 +201,7 @@ int sparse_keymap_setup(struct input_dev *dev,
201 break; 201 break;
202 202
203 case KE_SW: 203 case KE_SW:
204 case KE_VSW:
204 __set_bit(EV_SW, dev->evbit); 205 __set_bit(EV_SW, dev->evbit);
205 __set_bit(entry->sw.code, dev->swbit); 206 __set_bit(entry->sw.code, dev->swbit);
206 break; 207 break;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518782999fea..367fa82a607e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1101,6 +1101,13 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1101 } 1101 }
1102} 1102}
1103 1103
1104static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
1105 unsigned int physical_max)
1106{
1107 /* Touch physical dimensions are in 100th of mm */
1108 return (logical_max * 100) / physical_max;
1109}
1110
1104void wacom_setup_input_capabilities(struct input_dev *input_dev, 1111void wacom_setup_input_capabilities(struct input_dev *input_dev,
1105 struct wacom_wac *wacom_wac) 1112 struct wacom_wac *wacom_wac)
1106{ 1113{
@@ -1228,8 +1235,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1228 case TABLETPC: 1235 case TABLETPC:
1229 if (features->device_type == BTN_TOOL_DOUBLETAP || 1236 if (features->device_type == BTN_TOOL_DOUBLETAP ||
1230 features->device_type == BTN_TOOL_TRIPLETAP) { 1237 features->device_type == BTN_TOOL_TRIPLETAP) {
1231 input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0); 1238 input_abs_set_res(input_dev, ABS_X,
1232 input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0); 1239 wacom_calculate_touch_res(features->x_max,
1240 features->x_phy));
1241 input_abs_set_res(input_dev, ABS_Y,
1242 wacom_calculate_touch_res(features->y_max,
1243 features->y_phy));
1233 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); 1244 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1234 } 1245 }
1235 1246
@@ -1272,6 +1283,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1272 input_set_abs_params(input_dev, ABS_MT_PRESSURE, 1283 input_set_abs_params(input_dev, ABS_MT_PRESSURE,
1273 0, features->pressure_max, 1284 0, features->pressure_max,
1274 features->pressure_fuzz, 0); 1285 features->pressure_fuzz, 0);
1286 input_abs_set_res(input_dev, ABS_X,
1287 wacom_calculate_touch_res(features->x_max,
1288 features->x_phy));
1289 input_abs_set_res(input_dev, ABS_Y,
1290 wacom_calculate_touch_res(features->y_max,
1291 features->y_phy));
1275 } else if (features->device_type == BTN_TOOL_PEN) { 1292 } else if (features->device_type == BTN_TOOL_PEN) {
1276 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1293 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1277 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1294 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1426,6 +1443,10 @@ static struct wacom_features wacom_features_0xD3 =
1426 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1443 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
1427static const struct wacom_features wacom_features_0xD4 = 1444static const struct wacom_features wacom_features_0xD4 =
1428 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT }; 1445 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT };
1446static struct wacom_features wacom_features_0xD6 =
1447 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
1448static struct wacom_features wacom_features_0xD7 =
1449 { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
1429static struct wacom_features wacom_features_0xD8 = 1450static struct wacom_features wacom_features_0xD8 =
1430 { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1451 { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
1431static struct wacom_features wacom_features_0xDA = 1452static struct wacom_features wacom_features_0xDA =
@@ -1507,6 +1528,8 @@ const struct usb_device_id wacom_ids[] = {
1507 { USB_DEVICE_WACOM(0xD2) }, 1528 { USB_DEVICE_WACOM(0xD2) },
1508 { USB_DEVICE_WACOM(0xD3) }, 1529 { USB_DEVICE_WACOM(0xD3) },
1509 { USB_DEVICE_WACOM(0xD4) }, 1530 { USB_DEVICE_WACOM(0xD4) },
1531 { USB_DEVICE_WACOM(0xD6) },
1532 { USB_DEVICE_WACOM(0xD7) },
1510 { USB_DEVICE_WACOM(0xD8) }, 1533 { USB_DEVICE_WACOM(0xD8) },
1511 { USB_DEVICE_WACOM(0xDA) }, 1534 { USB_DEVICE_WACOM(0xDA) },
1512 { USB_DEVICE_WACOM(0xDB) }, 1535 { USB_DEVICE_WACOM(0xDB) },
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0c9f4b158ff0..61834ae282e1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -540,62 +540,62 @@ config TOUCHSCREEN_MC13783
540 540
541config TOUCHSCREEN_USB_EGALAX 541config TOUCHSCREEN_USB_EGALAX
542 default y 542 default y
543 bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED 543 bool "eGalax, eTurboTouch CT-410/510/700 device support" if EXPERT
544 depends on TOUCHSCREEN_USB_COMPOSITE 544 depends on TOUCHSCREEN_USB_COMPOSITE
545 545
546config TOUCHSCREEN_USB_PANJIT 546config TOUCHSCREEN_USB_PANJIT
547 default y 547 default y
548 bool "PanJit device support" if EMBEDDED 548 bool "PanJit device support" if EXPERT
549 depends on TOUCHSCREEN_USB_COMPOSITE 549 depends on TOUCHSCREEN_USB_COMPOSITE
550 550
551config TOUCHSCREEN_USB_3M 551config TOUCHSCREEN_USB_3M
552 default y 552 default y
553 bool "3M/Microtouch EX II series device support" if EMBEDDED 553 bool "3M/Microtouch EX II series device support" if EXPERT
554 depends on TOUCHSCREEN_USB_COMPOSITE 554 depends on TOUCHSCREEN_USB_COMPOSITE
555 555
556config TOUCHSCREEN_USB_ITM 556config TOUCHSCREEN_USB_ITM
557 default y 557 default y
558 bool "ITM device support" if EMBEDDED 558 bool "ITM device support" if EXPERT
559 depends on TOUCHSCREEN_USB_COMPOSITE 559 depends on TOUCHSCREEN_USB_COMPOSITE
560 560
561config TOUCHSCREEN_USB_ETURBO 561config TOUCHSCREEN_USB_ETURBO
562 default y 562 default y
563 bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED 563 bool "eTurboTouch (non-eGalax compatible) device support" if EXPERT
564 depends on TOUCHSCREEN_USB_COMPOSITE 564 depends on TOUCHSCREEN_USB_COMPOSITE
565 565
566config TOUCHSCREEN_USB_GUNZE 566config TOUCHSCREEN_USB_GUNZE
567 default y 567 default y
568 bool "Gunze AHL61 device support" if EMBEDDED 568 bool "Gunze AHL61 device support" if EXPERT
569 depends on TOUCHSCREEN_USB_COMPOSITE 569 depends on TOUCHSCREEN_USB_COMPOSITE
570 570
571config TOUCHSCREEN_USB_DMC_TSC10 571config TOUCHSCREEN_USB_DMC_TSC10
572 default y 572 default y
573 bool "DMC TSC-10/25 device support" if EMBEDDED 573 bool "DMC TSC-10/25 device support" if EXPERT
574 depends on TOUCHSCREEN_USB_COMPOSITE 574 depends on TOUCHSCREEN_USB_COMPOSITE
575 575
576config TOUCHSCREEN_USB_IRTOUCH 576config TOUCHSCREEN_USB_IRTOUCH
577 default y 577 default y
578 bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED 578 bool "IRTOUCHSYSTEMS/UNITOP device support" if EXPERT
579 depends on TOUCHSCREEN_USB_COMPOSITE 579 depends on TOUCHSCREEN_USB_COMPOSITE
580 580
581config TOUCHSCREEN_USB_IDEALTEK 581config TOUCHSCREEN_USB_IDEALTEK
582 default y 582 default y
583 bool "IdealTEK URTC1000 device support" if EMBEDDED 583 bool "IdealTEK URTC1000 device support" if EXPERT
584 depends on TOUCHSCREEN_USB_COMPOSITE 584 depends on TOUCHSCREEN_USB_COMPOSITE
585 585
586config TOUCHSCREEN_USB_GENERAL_TOUCH 586config TOUCHSCREEN_USB_GENERAL_TOUCH
587 default y 587 default y
588 bool "GeneralTouch Touchscreen device support" if EMBEDDED 588 bool "GeneralTouch Touchscreen device support" if EXPERT
589 depends on TOUCHSCREEN_USB_COMPOSITE 589 depends on TOUCHSCREEN_USB_COMPOSITE
590 590
591config TOUCHSCREEN_USB_GOTOP 591config TOUCHSCREEN_USB_GOTOP
592 default y 592 default y
593 bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED 593 bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EXPERT
594 depends on TOUCHSCREEN_USB_COMPOSITE 594 depends on TOUCHSCREEN_USB_COMPOSITE
595 595
596config TOUCHSCREEN_USB_JASTEC 596config TOUCHSCREEN_USB_JASTEC
597 default y 597 default y
598 bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED 598 bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
599 depends on TOUCHSCREEN_USB_COMPOSITE 599 depends on TOUCHSCREEN_USB_COMPOSITE
600 600
601config TOUCHSCREEN_USB_E2I 601config TOUCHSCREEN_USB_E2I
@@ -605,17 +605,17 @@ config TOUCHSCREEN_USB_E2I
605 605
606config TOUCHSCREEN_USB_ZYTRONIC 606config TOUCHSCREEN_USB_ZYTRONIC
607 default y 607 default y
608 bool "Zytronic controller" if EMBEDDED 608 bool "Zytronic controller" if EXPERT
609 depends on TOUCHSCREEN_USB_COMPOSITE 609 depends on TOUCHSCREEN_USB_COMPOSITE
610 610
611config TOUCHSCREEN_USB_ETT_TC45USB 611config TOUCHSCREEN_USB_ETT_TC45USB
612 default y 612 default y
613 bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED 613 bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EXPERT
614 depends on TOUCHSCREEN_USB_COMPOSITE 614 depends on TOUCHSCREEN_USB_COMPOSITE
615 615
616config TOUCHSCREEN_USB_NEXIO 616config TOUCHSCREEN_USB_NEXIO
617 default y 617 default y
618 bool "NEXIO/iNexio device support" if EMBEDDED 618 bool "NEXIO/iNexio device support" if EXPERT
619 depends on TOUCHSCREEN_USB_COMPOSITE 619 depends on TOUCHSCREEN_USB_COMPOSITE
620 620
621config TOUCHSCREEN_TOUCHIT213 621config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f7fa9ef4cd65..1507ce108d5b 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -12,6 +12,7 @@
12#include <linux/input.h> 12#include <linux/input.h>
13#include <linux/input/bu21013.h> 13#include <linux/input/bu21013.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/regulator/consumer.h>
15 16
16#define PEN_DOWN_INTR 0 17#define PEN_DOWN_INTR 0
17#define MAX_FINGERS 2 18#define MAX_FINGERS 2
@@ -139,6 +140,7 @@
139 * @chip: pointer to the touch panel controller 140 * @chip: pointer to the touch panel controller
140 * @in_dev: pointer to the input device structure 141 * @in_dev: pointer to the input device structure
141 * @intr_pin: interrupt pin value 142 * @intr_pin: interrupt pin value
143 * @regulator: pointer to the Regulator used for touch screen
142 * 144 *
143 * Touch panel device data structure 145 * Touch panel device data structure
144 */ 146 */
@@ -149,6 +151,7 @@ struct bu21013_ts_data {
149 const struct bu21013_platform_device *chip; 151 const struct bu21013_platform_device *chip;
150 struct input_dev *in_dev; 152 struct input_dev *in_dev;
151 unsigned int intr_pin; 153 unsigned int intr_pin;
154 struct regulator *regulator;
152}; 155};
153 156
154/** 157/**
@@ -456,6 +459,20 @@ static int __devinit bu21013_probe(struct i2c_client *client,
456 bu21013_data->in_dev = in_dev; 459 bu21013_data->in_dev = in_dev;
457 bu21013_data->chip = pdata; 460 bu21013_data->chip = pdata;
458 bu21013_data->client = client; 461 bu21013_data->client = client;
462
463 bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
464 if (IS_ERR(bu21013_data->regulator)) {
465 dev_err(&client->dev, "regulator_get failed\n");
466 error = PTR_ERR(bu21013_data->regulator);
467 goto err_free_mem;
468 }
469
470 error = regulator_enable(bu21013_data->regulator);
471 if (error < 0) {
472 dev_err(&client->dev, "regulator enable failed\n");
473 goto err_put_regulator;
474 }
475
459 bu21013_data->touch_stopped = false; 476 bu21013_data->touch_stopped = false;
460 init_waitqueue_head(&bu21013_data->wait); 477 init_waitqueue_head(&bu21013_data->wait);
461 478
@@ -464,7 +481,7 @@ static int __devinit bu21013_probe(struct i2c_client *client,
464 error = pdata->cs_en(pdata->cs_pin); 481 error = pdata->cs_en(pdata->cs_pin);
465 if (error < 0) { 482 if (error < 0) {
466 dev_err(&client->dev, "chip init failed\n"); 483 dev_err(&client->dev, "chip init failed\n");
467 goto err_free_mem; 484 goto err_disable_regulator;
468 } 485 }
469 } 486 }
470 487
@@ -485,9 +502,9 @@ static int __devinit bu21013_probe(struct i2c_client *client,
485 __set_bit(EV_ABS, in_dev->evbit); 502 __set_bit(EV_ABS, in_dev->evbit);
486 503
487 input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0, 504 input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
488 pdata->x_max_res, 0, 0); 505 pdata->touch_x_max, 0, 0);
489 input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0, 506 input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
490 pdata->y_max_res, 0, 0); 507 pdata->touch_y_max, 0, 0);
491 input_set_drvdata(in_dev, bu21013_data); 508 input_set_drvdata(in_dev, bu21013_data);
492 509
493 error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq, 510 error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
@@ -513,6 +530,10 @@ err_free_irq:
513 bu21013_free_irq(bu21013_data); 530 bu21013_free_irq(bu21013_data);
514err_cs_disable: 531err_cs_disable:
515 pdata->cs_dis(pdata->cs_pin); 532 pdata->cs_dis(pdata->cs_pin);
533err_disable_regulator:
534 regulator_disable(bu21013_data->regulator);
535err_put_regulator:
536 regulator_put(bu21013_data->regulator);
516err_free_mem: 537err_free_mem:
517 input_free_device(in_dev); 538 input_free_device(in_dev);
518 kfree(bu21013_data); 539 kfree(bu21013_data);
@@ -535,6 +556,10 @@ static int __devexit bu21013_remove(struct i2c_client *client)
535 bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin); 556 bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
536 557
537 input_unregister_device(bu21013_data->in_dev); 558 input_unregister_device(bu21013_data->in_dev);
559
560 regulator_disable(bu21013_data->regulator);
561 regulator_put(bu21013_data->regulator);
562
538 kfree(bu21013_data); 563 kfree(bu21013_data);
539 564
540 device_init_wakeup(&client->dev, false); 565 device_init_wakeup(&client->dev, false);
@@ -561,6 +586,8 @@ static int bu21013_suspend(struct device *dev)
561 else 586 else
562 disable_irq(bu21013_data->chip->irq); 587 disable_irq(bu21013_data->chip->irq);
563 588
589 regulator_disable(bu21013_data->regulator);
590
564 return 0; 591 return 0;
565} 592}
566 593
@@ -577,6 +604,12 @@ static int bu21013_resume(struct device *dev)
577 struct i2c_client *client = bu21013_data->client; 604 struct i2c_client *client = bu21013_data->client;
578 int retval; 605 int retval;
579 606
607 retval = regulator_enable(bu21013_data->regulator);
608 if (retval < 0) {
609 dev_err(&client->dev, "bu21013 regulator enable failed\n");
610 return retval;
611 }
612
580 retval = bu21013_init_chip(bu21013_data); 613 retval = bu21013_init_chip(bu21013_data);
581 if (retval < 0) { 614 if (retval < 0) {
582 dev_err(&client->dev, "bu21013 controller config failed\n"); 615 dev_err(&client->dev, "bu21013 controller config failed\n");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index cf1dba2e267c..22a3411e93c5 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/err.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/input.h> 19#include <linux/input.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -289,9 +290,9 @@ static int __devinit tsc_probe(struct platform_device *pdev)
289 } 290 }
290 291
291 ts->clk = clk_get(dev, NULL); 292 ts->clk = clk_get(dev, NULL);
292 if (!ts->clk) { 293 if (IS_ERR(ts->clk)) {
293 dev_err(dev, "cannot claim device clock\n"); 294 dev_err(dev, "cannot claim device clock\n");
294 error = -EINVAL; 295 error = PTR_ERR(ts->clk);
295 goto error_clk; 296 goto error_clk;
296 } 297 }
297 298
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df4089385..18b801ad97a4 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@ extern hysdn_card *card_root; /* pointer to first card */
227/*************************/ 227/*************************/
228/* im/exported functions */ 228/* im/exported functions */
229/*************************/ 229/*************************/
230extern char *hysdn_getrev(const char *);
231 230
232/* hysdn_procconf.c */ 231/* hysdn_procconf.c */
233extern int hysdn_procconf_init(void); /* init proc config filesys */ 232extern int hysdn_procconf_init(void); /* init proc config filesys */
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
259 258
260/* hysdn_net.c */ 259/* hysdn_net.c */
261extern unsigned int hynet_enable; 260extern unsigned int hynet_enable;
262extern char *hysdn_net_revision;
263extern int hysdn_net_create(hysdn_card *); /* create a new net device */ 261extern int hysdn_net_create(hysdn_card *); /* create a new net device */
264extern int hysdn_net_release(hysdn_card *); /* delete the device */ 262extern int hysdn_net_release(hysdn_card *); /* delete the device */
265extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ 263extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2f08c6..0ab42ace1692 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
36MODULE_AUTHOR("Werner Cornelius"); 36MODULE_AUTHOR("Werner Cornelius");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
40static int cardmax; /* number of found cards */ 39static int cardmax; /* number of found cards */
41hysdn_card *card_root = NULL; /* pointer to first card */ 40hysdn_card *card_root = NULL; /* pointer to first card */
42static hysdn_card *card_last = NULL; /* pointer to first card */ 41static hysdn_card *card_last = NULL; /* pointer to first card */
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
49/* Additionally newer versions may be activated without rebooting. */ 48/* Additionally newer versions may be activated without rebooting. */
50/****************************************************************************/ 49/****************************************************************************/
51 50
52/******************************************************/
53/* extract revision number from string for log output */
54/******************************************************/
55char *
56hysdn_getrev(const char *revision)
57{
58 char *rev;
59 char *p;
60
61 if ((p = strchr(revision, ':'))) {
62 rev = p + 2;
63 p = strchr(rev, '$');
64 *--p = 0;
65 } else
66 rev = "???";
67 return rev;
68}
69
70
71/****************************************************************************/ 51/****************************************************************************/
72/* init_module is called once when the module is loaded to do all necessary */ 52/* init_module is called once when the module is loaded to do all necessary */
73/* things like autodetect... */ 53/* things like autodetect... */
@@ -175,13 +155,9 @@ static int hysdn_have_procfs;
175static int __init 155static int __init
176hysdn_init(void) 156hysdn_init(void)
177{ 157{
178 char tmp[50];
179 int rc; 158 int rc;
180 159
181 strcpy(tmp, hysdn_init_revision); 160 printk(KERN_NOTICE "HYSDN: module loaded\n");
182 printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
183 strcpy(tmp, hysdn_net_revision);
184 printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
185 161
186 rc = pci_register_driver(&hysdn_pci_driver); 162 rc = pci_register_driver(&hysdn_pci_driver);
187 if (rc) 163 if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d89d719..11f2cce26005 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
26unsigned int hynet_enable = 0xffffffff; 26unsigned int hynet_enable = 0xffffffff;
27module_param(hynet_enable, uint, 0); 27module_param(hynet_enable, uint, 0);
28 28
29/* store the actual version for log reporting */
30char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
31
32#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */ 29#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
33 30
34/****************************************************************************/ 31/****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39c3356..5fe83bd42061 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
23#include "hysdn_defs.h" 23#include "hysdn_defs.h"
24 24
25static DEFINE_MUTEX(hysdn_conf_mutex); 25static DEFINE_MUTEX(hysdn_conf_mutex);
26static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
27 26
28#define INFO_OUT_LEN 80 /* length of info line including lf */ 27#define INFO_OUT_LEN 80 /* length of info line including lf */
29 28
@@ -404,7 +403,7 @@ hysdn_procconf_init(void)
404 card = card->next; /* next entry */ 403 card = card->next; /* next entry */
405 } 404 }
406 405
407 printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision)); 406 printk(KERN_NOTICE "HYSDN: procfs initialised\n");
408 return (0); 407 return (0);
409} /* hysdn_procconf_init */ 408} /* hysdn_procconf_init */
410 409
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index f2b5bab5e6a1..1f355bb85e54 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
1627static int __init icn_init(void) 1627static int __init icn_init(void)
1628{ 1628{
1629 char *p; 1629 char *p;
1630 char rev[20]; 1630 char rev[21];
1631 1631
1632 memset(&dev, 0, sizeof(icn_dev)); 1632 memset(&dev, 0, sizeof(icn_dev));
1633 dev.memaddr = (membase & 0x0ffc000); 1633 dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@ static int __init icn_init(void)
1638 1638
1639 if ((p = strchr(revision, ':'))) { 1639 if ((p = strchr(revision, ':'))) {
1640 strncpy(rev, p + 1, 20); 1640 strncpy(rev, p + 1, 20);
1641 rev[20] = '\0';
1641 p = strchr(rev, '$'); 1642 p = strchr(rev, '$');
1642 if (p) 1643 if (p)
1643 *p = 0; 1644 *p = 0;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index da3fa8dcdf5b..666daf77872e 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -69,6 +69,7 @@ static int led_pwm_probe(struct platform_device *pdev)
69 led_dat->pwm = pwm_request(cur_led->pwm_id, 69 led_dat->pwm = pwm_request(cur_led->pwm_id,
70 cur_led->name); 70 cur_led->name);
71 if (IS_ERR(led_dat->pwm)) { 71 if (IS_ERR(led_dat->pwm)) {
72 ret = PTR_ERR(led_dat->pwm);
72 dev_err(&pdev->dev, "unable to request PWM %d\n", 73 dev_err(&pdev->dev, "unable to request PWM %d\n",
73 cur_led->pwm_id); 74 cur_led->pwm_id);
74 goto err; 75 goto err;
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 991d93be0f44..ecc4bf3f37a9 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -99,7 +99,7 @@ static ssize_t gpio_trig_inverted_show(struct device *dev,
99 struct led_classdev *led = dev_get_drvdata(dev); 99 struct led_classdev *led = dev_get_drvdata(dev);
100 struct gpio_trig_data *gpio_data = led->trigger_data; 100 struct gpio_trig_data *gpio_data = led->trigger_data;
101 101
102 return sprintf(buf, "%s\n", gpio_data->inverted ? "yes" : "no"); 102 return sprintf(buf, "%u\n", gpio_data->inverted);
103} 103}
104 104
105static ssize_t gpio_trig_inverted_store(struct device *dev, 105static ssize_t gpio_trig_inverted_store(struct device *dev,
@@ -107,16 +107,17 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
107{ 107{
108 struct led_classdev *led = dev_get_drvdata(dev); 108 struct led_classdev *led = dev_get_drvdata(dev);
109 struct gpio_trig_data *gpio_data = led->trigger_data; 109 struct gpio_trig_data *gpio_data = led->trigger_data;
110 unsigned inverted; 110 unsigned long inverted;
111 int ret; 111 int ret;
112 112
113 ret = sscanf(buf, "%u", &inverted); 113 ret = strict_strtoul(buf, 10, &inverted);
114 if (ret < 1) { 114 if (ret < 0)
115 dev_err(dev, "invalid value\n"); 115 return ret;
116
117 if (inverted > 1)
116 return -EINVAL; 118 return -EINVAL;
117 }
118 119
119 gpio_data->inverted = !!inverted; 120 gpio_data->inverted = inverted;
120 121
121 /* After inverting, we need to update the LED. */ 122 /* After inverting, we need to update the LED. */
122 schedule_work(&gpio_data->work); 123 schedule_work(&gpio_data->work);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 04b22128a474..d21578ee95de 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1137,7 +1137,7 @@ void free_guest_pagetable(struct lguest *lg)
1137 */ 1137 */
1138void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 1138void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1139{ 1139{
1140 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 1140 pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
1141 pte_t regs_pte; 1141 pte_t regs_pte;
1142 1142
1143#ifdef CONFIG_X86_PAE 1143#ifdef CONFIG_X86_PAE
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index b4eb675a807e..9f1659c3d1f3 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
90 * meanwhile). If that's not the case, we pretend everything in the 90 * meanwhile). If that's not the case, we pretend everything in the
91 * Guest has changed. 91 * Guest has changed.
92 */ 92 */
93 if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) { 93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
94 __get_cpu_var(lg_last_cpu) = cpu; 94 __this_cpu_write(lg_last_cpu, cpu);
95 cpu->last_pages = pages; 95 cpu->last_pages = pages;
96 cpu->changed = CHANGED_ALL; 96 cpu->changed = CHANGED_ALL;
97 } 97 }
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 2e041fd0a00c..f3a29f264db9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -443,7 +443,7 @@ static int fan_read_reg(int reg, unsigned char *buf, int nb)
443 tries = 0; 443 tries = 0;
444 for (;;) { 444 for (;;) {
445 nr = i2c_master_recv(fcu, buf, nb); 445 nr = i2c_master_recv(fcu, buf, nb);
446 if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100) 446 if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
447 break; 447 break;
448 msleep(10); 448 msleep(10);
449 ++tries; 449 ++tries;
@@ -464,7 +464,7 @@ static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
464 tries = 0; 464 tries = 0;
465 for (;;) { 465 for (;;) {
466 nw = i2c_master_send(fcu, buf, nb); 466 nw = i2c_master_send(fcu, buf, nb);
467 if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100) 467 if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
468 break; 468 break;
469 msleep(10); 469 msleep(10);
470 ++tries; 470 ++tries;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index bf1a95e31559..98d9ec85e0eb 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,6 +240,30 @@ config DM_MIRROR
240 Allow volume managers to mirror logical volumes, also 240 Allow volume managers to mirror logical volumes, also
241 needed for live data migration tools such as 'pvmove'. 241 needed for live data migration tools such as 'pvmove'.
242 242
243config DM_RAID
244 tristate "RAID 4/5/6 target (EXPERIMENTAL)"
245 depends on BLK_DEV_DM && EXPERIMENTAL
246 select MD_RAID456
247 select BLK_DEV_MD
248 ---help---
249 A dm target that supports RAID4, RAID5 and RAID6 mappings
250
251 A RAID-5 set of N drives with a capacity of C MB per drive provides
252 the capacity of C * (N - 1) MB, and protects against a failure
253 of a single drive. For a given sector (row) number, (N - 1) drives
254 contain data sectors, and one drive contains the parity protection.
255 For a RAID-4 set, the parity blocks are present on a single drive,
256 while a RAID-5 set distributes the parity across the drives in one
257 of the available parity distribution methods.
258
259 A RAID-6 set of N drives with a capacity of C MB per drive
260 provides the capacity of C * (N - 2) MB, and protects
261 against a failure of any two drives. For a given sector
262 (row) number, (N - 2) drives contain data sectors, and two
263 drives contains two independent redundancy syndromes. Like
264 RAID-5, RAID-6 distributes the syndromes across the drives
265 in one of the available parity distribution methods.
266
243config DM_LOG_USERSPACE 267config DM_LOG_USERSPACE
244 tristate "Mirror userspace logging (EXPERIMENTAL)" 268 tristate "Mirror userspace logging (EXPERIMENTAL)"
245 depends on DM_MIRROR && EXPERIMENTAL && NET 269 depends on DM_MIRROR && EXPERIMENTAL && NET
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5e3aac41919d..d0138606c2e8 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
36obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o 36obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
37obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o 37obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
38obj-$(CONFIG_DM_ZERO) += dm-zero.o 38obj-$(CONFIG_DM_ZERO) += dm-zero.o
39obj-$(CONFIG_DM_RAID) += dm-raid.o
39 40
40ifeq ($(CONFIG_DM_UEVENT),y) 41ifeq ($(CONFIG_DM_UEVENT),y)
41dm-mod-objs += dm-uevent.o 42dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a1ffe3527aa..9a35320fb59f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,11 +210,11 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
210 || test_bit(Faulty, &rdev->flags)) 210 || test_bit(Faulty, &rdev->flags))
211 continue; 211 continue;
212 212
213 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 213 target = offset + index * (PAGE_SIZE/512);
214 214
215 if (sync_page_io(rdev, target, 215 if (sync_page_io(rdev, target,
216 roundup(size, bdev_logical_block_size(rdev->bdev)), 216 roundup(size, bdev_logical_block_size(rdev->bdev)),
217 page, READ)) { 217 page, READ, true)) {
218 page->index = index; 218 page->index = index;
219 attach_page_buffers(page, NULL); /* so that free_buffer will 219 attach_page_buffers(page, NULL); /* so that free_buffer will
220 * quietly no-op */ 220 * quietly no-op */
@@ -264,14 +264,18 @@ static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
264static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 264static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
265{ 265{
266 mdk_rdev_t *rdev = NULL; 266 mdk_rdev_t *rdev = NULL;
267 struct block_device *bdev;
267 mddev_t *mddev = bitmap->mddev; 268 mddev_t *mddev = bitmap->mddev;
268 269
269 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 270 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
270 int size = PAGE_SIZE; 271 int size = PAGE_SIZE;
271 loff_t offset = mddev->bitmap_info.offset; 272 loff_t offset = mddev->bitmap_info.offset;
273
274 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
275
272 if (page->index == bitmap->file_pages-1) 276 if (page->index == bitmap->file_pages-1)
273 size = roundup(bitmap->last_page_size, 277 size = roundup(bitmap->last_page_size,
274 bdev_logical_block_size(rdev->bdev)); 278 bdev_logical_block_size(bdev));
275 /* Just make sure we aren't corrupting data or 279 /* Just make sure we aren't corrupting data or
276 * metadata 280 * metadata
277 */ 281 */
@@ -1542,7 +1546,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1542 wait_event(bitmap->mddev->recovery_wait, 1546 wait_event(bitmap->mddev->recovery_wait,
1543 atomic_read(&bitmap->mddev->recovery_active) == 0); 1547 atomic_read(&bitmap->mddev->recovery_active) == 0);
1544 1548
1545 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; 1549 bitmap->mddev->curr_resync_completed = sector;
1546 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); 1550 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1547 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); 1551 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
1548 s = 0; 1552 s = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d5b0e4c0e702..4e054bd91664 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,10 +18,14 @@
18#include <linux/crypto.h> 18#include <linux/crypto.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/backing-dev.h> 20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
21#include <asm/atomic.h> 22#include <asm/atomic.h>
22#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
23#include <asm/page.h> 24#include <asm/page.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
26#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
25 29
26#include <linux/device-mapper.h> 30#include <linux/device-mapper.h>
27 31
@@ -63,6 +67,7 @@ struct dm_crypt_request {
63 struct convert_context *ctx; 67 struct convert_context *ctx;
64 struct scatterlist sg_in; 68 struct scatterlist sg_in;
65 struct scatterlist sg_out; 69 struct scatterlist sg_out;
70 sector_t iv_sector;
66}; 71};
67 72
68struct crypt_config; 73struct crypt_config;
@@ -73,11 +78,13 @@ struct crypt_iv_operations {
73 void (*dtr)(struct crypt_config *cc); 78 void (*dtr)(struct crypt_config *cc);
74 int (*init)(struct crypt_config *cc); 79 int (*init)(struct crypt_config *cc);
75 int (*wipe)(struct crypt_config *cc); 80 int (*wipe)(struct crypt_config *cc);
76 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 81 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
77}; 85};
78 86
79struct iv_essiv_private { 87struct iv_essiv_private {
80 struct crypto_cipher *tfm;
81 struct crypto_hash *hash_tfm; 88 struct crypto_hash *hash_tfm;
82 u8 *salt; 89 u8 *salt;
83}; 90};
@@ -86,11 +93,32 @@ struct iv_benbi_private {
86 int shift; 93 int shift;
87}; 94};
88 95
96#define LMK_SEED_SIZE 64 /* hash + 0 */
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
89/* 102/*
90 * Crypt: maps a linear range of a block device 103 * Crypt: maps a linear range of a block device
91 * and encrypts / decrypts at the same time. 104 * and encrypts / decrypts at the same time.
92 */ 105 */
93enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 106enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
107
108/*
109 * Duplicated per-CPU state for cipher.
110 */
111struct crypt_cpu {
112 struct ablkcipher_request *req;
113 /* ESSIV: struct crypto_cipher *essiv_tfm */
114 void *iv_private;
115 struct crypto_ablkcipher *tfms[0];
116};
117
118/*
119 * The fields in here must be read only after initialization,
120 * changing state should be in crypt_cpu.
121 */
94struct crypt_config { 122struct crypt_config {
95 struct dm_dev *dev; 123 struct dm_dev *dev;
96 sector_t start; 124 sector_t start;
@@ -108,17 +136,25 @@ struct crypt_config {
108 struct workqueue_struct *crypt_queue; 136 struct workqueue_struct *crypt_queue;
109 137
110 char *cipher; 138 char *cipher;
111 char *cipher_mode; 139 char *cipher_string;
112 140
113 struct crypt_iv_operations *iv_gen_ops; 141 struct crypt_iv_operations *iv_gen_ops;
114 union { 142 union {
115 struct iv_essiv_private essiv; 143 struct iv_essiv_private essiv;
116 struct iv_benbi_private benbi; 144 struct iv_benbi_private benbi;
145 struct iv_lmk_private lmk;
117 } iv_gen_private; 146 } iv_gen_private;
118 sector_t iv_offset; 147 sector_t iv_offset;
119 unsigned int iv_size; 148 unsigned int iv_size;
120 149
121 /* 150 /*
151 * Duplicated per cpu state. Access through
152 * per_cpu_ptr() only.
153 */
154 struct crypt_cpu __percpu *cpu;
155 unsigned tfms_count;
156
157 /*
122 * Layout of each crypto request: 158 * Layout of each crypto request:
123 * 159 *
124 * struct ablkcipher_request 160 * struct ablkcipher_request
@@ -132,11 +168,10 @@ struct crypt_config {
132 * correctly aligned. 168 * correctly aligned.
133 */ 169 */
134 unsigned int dmreq_start; 170 unsigned int dmreq_start;
135 struct ablkcipher_request *req;
136 171
137 struct crypto_ablkcipher *tfm;
138 unsigned long flags; 172 unsigned long flags;
139 unsigned int key_size; 173 unsigned int key_size;
174 unsigned int key_parts;
140 u8 key[0]; 175 u8 key[0];
141}; 176};
142 177
@@ -148,6 +183,20 @@ static struct kmem_cache *_crypt_io_pool;
148 183
149static void clone_init(struct dm_crypt_io *, struct bio *); 184static void clone_init(struct dm_crypt_io *, struct bio *);
150static void kcryptd_queue_crypt(struct dm_crypt_io *io); 185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
186static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
187
188static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
189{
190 return this_cpu_ptr(cc->cpu);
191}
192
193/*
194 * Use this to access cipher attributes that are the same for each CPU.
195 */
196static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
197{
198 return __this_cpu_ptr(cc->cpu)->tfms[0];
199}
151 200
152/* 201/*
153 * Different IV generation algorithms: 202 * Different IV generation algorithms:
@@ -168,23 +217,38 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
168 * null: the initial vector is always zero. Provides compatibility with 217 * null: the initial vector is always zero. Provides compatibility with
169 * obsolete loop_fish2 devices. Do not use for new devices. 218 * obsolete loop_fish2 devices. Do not use for new devices.
170 * 219 *
220 * lmk: Compatible implementation of the block chaining mode used
221 * by the Loop-AES block device encryption system
222 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
223 * It operates on full 512 byte sectors and uses CBC
224 * with an IV derived from the sector number, the data and
225 * optionally extra IV seed.
226 * This means that after decryption the first block
227 * of sector must be tweaked according to decrypted data.
228 * Loop-AES can use three encryption schemes:
229 * version 1: is plain aes-cbc mode
230 * version 2: uses 64 multikey scheme with lmk IV generator
231 * version 3: the same as version 2 with additional IV seed
232 * (it uses 65 keys, last key is used as IV seed)
233 *
171 * plumb: unimplemented, see: 234 * plumb: unimplemented, see:
172 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 235 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
173 */ 236 */
174 237
175static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 238static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
239 struct dm_crypt_request *dmreq)
176{ 240{
177 memset(iv, 0, cc->iv_size); 241 memset(iv, 0, cc->iv_size);
178 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 242 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
179 243
180 return 0; 244 return 0;
181} 245}
182 246
183static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 247static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
184 sector_t sector) 248 struct dm_crypt_request *dmreq)
185{ 249{
186 memset(iv, 0, cc->iv_size); 250 memset(iv, 0, cc->iv_size);
187 *(u64 *)iv = cpu_to_le64(sector); 251 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
188 252
189 return 0; 253 return 0;
190} 254}
@@ -195,7 +259,8 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
195 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 259 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
196 struct hash_desc desc; 260 struct hash_desc desc;
197 struct scatterlist sg; 261 struct scatterlist sg;
198 int err; 262 struct crypto_cipher *essiv_tfm;
263 int err, cpu;
199 264
200 sg_init_one(&sg, cc->key, cc->key_size); 265 sg_init_one(&sg, cc->key, cc->key_size);
201 desc.tfm = essiv->hash_tfm; 266 desc.tfm = essiv->hash_tfm;
@@ -205,8 +270,16 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
205 if (err) 270 if (err)
206 return err; 271 return err;
207 272
208 return crypto_cipher_setkey(essiv->tfm, essiv->salt, 273 for_each_possible_cpu(cpu) {
274 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
275
276 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
209 crypto_hash_digestsize(essiv->hash_tfm)); 277 crypto_hash_digestsize(essiv->hash_tfm));
278 if (err)
279 return err;
280 }
281
282 return 0;
210} 283}
211 284
212/* Wipe salt and reset key derived from volume key */ 285/* Wipe salt and reset key derived from volume key */
@@ -214,24 +287,76 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
214{ 287{
215 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 288 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
216 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); 289 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
290 struct crypto_cipher *essiv_tfm;
291 int cpu, r, err = 0;
217 292
218 memset(essiv->salt, 0, salt_size); 293 memset(essiv->salt, 0, salt_size);
219 294
220 return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); 295 for_each_possible_cpu(cpu) {
296 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
297 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
298 if (r)
299 err = r;
300 }
301
302 return err;
303}
304
305/* Set up per cpu cipher state */
306static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
307 struct dm_target *ti,
308 u8 *salt, unsigned saltsize)
309{
310 struct crypto_cipher *essiv_tfm;
311 int err;
312
313 /* Setup the essiv_tfm with the given salt */
314 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
315 if (IS_ERR(essiv_tfm)) {
316 ti->error = "Error allocating crypto tfm for ESSIV";
317 return essiv_tfm;
318 }
319
320 if (crypto_cipher_blocksize(essiv_tfm) !=
321 crypto_ablkcipher_ivsize(any_tfm(cc))) {
322 ti->error = "Block size of ESSIV cipher does "
323 "not match IV size of block cipher";
324 crypto_free_cipher(essiv_tfm);
325 return ERR_PTR(-EINVAL);
326 }
327
328 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
329 if (err) {
330 ti->error = "Failed to set key for ESSIV cipher";
331 crypto_free_cipher(essiv_tfm);
332 return ERR_PTR(err);
333 }
334
335 return essiv_tfm;
221} 336}
222 337
223static void crypt_iv_essiv_dtr(struct crypt_config *cc) 338static void crypt_iv_essiv_dtr(struct crypt_config *cc)
224{ 339{
340 int cpu;
341 struct crypt_cpu *cpu_cc;
342 struct crypto_cipher *essiv_tfm;
225 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; 343 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
226 344
227 crypto_free_cipher(essiv->tfm);
228 essiv->tfm = NULL;
229
230 crypto_free_hash(essiv->hash_tfm); 345 crypto_free_hash(essiv->hash_tfm);
231 essiv->hash_tfm = NULL; 346 essiv->hash_tfm = NULL;
232 347
233 kzfree(essiv->salt); 348 kzfree(essiv->salt);
234 essiv->salt = NULL; 349 essiv->salt = NULL;
350
351 for_each_possible_cpu(cpu) {
352 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
353 essiv_tfm = cpu_cc->iv_private;
354
355 if (essiv_tfm)
356 crypto_free_cipher(essiv_tfm);
357
358 cpu_cc->iv_private = NULL;
359 }
235} 360}
236 361
237static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 362static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +365,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
240 struct crypto_cipher *essiv_tfm = NULL; 365 struct crypto_cipher *essiv_tfm = NULL;
241 struct crypto_hash *hash_tfm = NULL; 366 struct crypto_hash *hash_tfm = NULL;
242 u8 *salt = NULL; 367 u8 *salt = NULL;
243 int err; 368 int err, cpu;
244 369
245 if (!opts) { 370 if (!opts) {
246 ti->error = "Digest algorithm missing for ESSIV mode"; 371 ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,48 +387,44 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
262 goto bad; 387 goto bad;
263 } 388 }
264 389
265 /* Allocate essiv_tfm */
266 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
267 if (IS_ERR(essiv_tfm)) {
268 ti->error = "Error allocating crypto tfm for ESSIV";
269 err = PTR_ERR(essiv_tfm);
270 goto bad;
271 }
272 if (crypto_cipher_blocksize(essiv_tfm) !=
273 crypto_ablkcipher_ivsize(cc->tfm)) {
274 ti->error = "Block size of ESSIV cipher does "
275 "not match IV size of block cipher";
276 err = -EINVAL;
277 goto bad;
278 }
279
280 cc->iv_gen_private.essiv.salt = salt; 390 cc->iv_gen_private.essiv.salt = salt;
281 cc->iv_gen_private.essiv.tfm = essiv_tfm;
282 cc->iv_gen_private.essiv.hash_tfm = hash_tfm; 391 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
283 392
393 for_each_possible_cpu(cpu) {
394 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
395 crypto_hash_digestsize(hash_tfm));
396 if (IS_ERR(essiv_tfm)) {
397 crypt_iv_essiv_dtr(cc);
398 return PTR_ERR(essiv_tfm);
399 }
400 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
401 }
402
284 return 0; 403 return 0;
285 404
286bad: 405bad:
287 if (essiv_tfm && !IS_ERR(essiv_tfm))
288 crypto_free_cipher(essiv_tfm);
289 if (hash_tfm && !IS_ERR(hash_tfm)) 406 if (hash_tfm && !IS_ERR(hash_tfm))
290 crypto_free_hash(hash_tfm); 407 crypto_free_hash(hash_tfm);
291 kfree(salt); 408 kfree(salt);
292 return err; 409 return err;
293} 410}
294 411
295static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 412static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
413 struct dm_crypt_request *dmreq)
296{ 414{
415 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
416
297 memset(iv, 0, cc->iv_size); 417 memset(iv, 0, cc->iv_size);
298 *(u64 *)iv = cpu_to_le64(sector); 418 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
299 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); 419 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
420
300 return 0; 421 return 0;
301} 422}
302 423
303static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, 424static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
304 const char *opts) 425 const char *opts)
305{ 426{
306 unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); 427 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
307 int log = ilog2(bs); 428 int log = ilog2(bs);
308 429
309 /* we need to calculate how far we must shift the sector count 430 /* we need to calculate how far we must shift the sector count
@@ -328,25 +449,177 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc)
328{ 449{
329} 450}
330 451
331static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 452static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
453 struct dm_crypt_request *dmreq)
332{ 454{
333 __be64 val; 455 __be64 val;
334 456
335 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 457 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
336 458
337 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); 459 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
338 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 460 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
339 461
340 return 0; 462 return 0;
341} 463}
342 464
343static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 465static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
466 struct dm_crypt_request *dmreq)
344{ 467{
345 memset(iv, 0, cc->iv_size); 468 memset(iv, 0, cc->iv_size);
346 469
347 return 0; 470 return 0;
348} 471}
349 472
473static void crypt_iv_lmk_dtr(struct crypt_config *cc)
474{
475 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
476
477 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
478 crypto_free_shash(lmk->hash_tfm);
479 lmk->hash_tfm = NULL;
480
481 kzfree(lmk->seed);
482 lmk->seed = NULL;
483}
484
485static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
486 const char *opts)
487{
488 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
489
490 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
491 if (IS_ERR(lmk->hash_tfm)) {
492 ti->error = "Error initializing LMK hash";
493 return PTR_ERR(lmk->hash_tfm);
494 }
495
496 /* No seed in LMK version 2 */
497 if (cc->key_parts == cc->tfms_count) {
498 lmk->seed = NULL;
499 return 0;
500 }
501
502 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
503 if (!lmk->seed) {
504 crypt_iv_lmk_dtr(cc);
505 ti->error = "Error kmallocing seed storage in LMK";
506 return -ENOMEM;
507 }
508
509 return 0;
510}
511
512static int crypt_iv_lmk_init(struct crypt_config *cc)
513{
514 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
515 int subkey_size = cc->key_size / cc->key_parts;
516
517 /* LMK seed is on the position of LMK_KEYS + 1 key */
518 if (lmk->seed)
519 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
520 crypto_shash_digestsize(lmk->hash_tfm));
521
522 return 0;
523}
524
525static int crypt_iv_lmk_wipe(struct crypt_config *cc)
526{
527 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
528
529 if (lmk->seed)
530 memset(lmk->seed, 0, LMK_SEED_SIZE);
531
532 return 0;
533}
534
535static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
536 struct dm_crypt_request *dmreq,
537 u8 *data)
538{
539 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
540 struct {
541 struct shash_desc desc;
542 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
543 } sdesc;
544 struct md5_state md5state;
545 u32 buf[4];
546 int i, r;
547
548 sdesc.desc.tfm = lmk->hash_tfm;
549 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
550
551 r = crypto_shash_init(&sdesc.desc);
552 if (r)
553 return r;
554
555 if (lmk->seed) {
556 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
557 if (r)
558 return r;
559 }
560
561 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
562 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
563 if (r)
564 return r;
565
566 /* Sector is cropped to 56 bits here */
567 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
568 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
569 buf[2] = cpu_to_le32(4024);
570 buf[3] = 0;
571 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
572 if (r)
573 return r;
574
575 /* No MD5 padding here */
576 r = crypto_shash_export(&sdesc.desc, &md5state);
577 if (r)
578 return r;
579
580 for (i = 0; i < MD5_HASH_WORDS; i++)
581 __cpu_to_le32s(&md5state.hash[i]);
582 memcpy(iv, &md5state.hash, cc->iv_size);
583
584 return 0;
585}
586
587static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
588 struct dm_crypt_request *dmreq)
589{
590 u8 *src;
591 int r = 0;
592
593 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
594 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
595 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
596 kunmap_atomic(src, KM_USER0);
597 } else
598 memset(iv, 0, cc->iv_size);
599
600 return r;
601}
602
603static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
604 struct dm_crypt_request *dmreq)
605{
606 u8 *dst;
607 int r;
608
609 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
610 return 0;
611
612 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
613 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
614
615 /* Tweak the first block of plaintext sector */
616 if (!r)
617 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
618
619 kunmap_atomic(dst, KM_USER0);
620 return r;
621}
622
350static struct crypt_iv_operations crypt_iv_plain_ops = { 623static struct crypt_iv_operations crypt_iv_plain_ops = {
351 .generator = crypt_iv_plain_gen 624 .generator = crypt_iv_plain_gen
352}; 625};
@@ -373,6 +646,15 @@ static struct crypt_iv_operations crypt_iv_null_ops = {
373 .generator = crypt_iv_null_gen 646 .generator = crypt_iv_null_gen
374}; 647};
375 648
649static struct crypt_iv_operations crypt_iv_lmk_ops = {
650 .ctr = crypt_iv_lmk_ctr,
651 .dtr = crypt_iv_lmk_dtr,
652 .init = crypt_iv_lmk_init,
653 .wipe = crypt_iv_lmk_wipe,
654 .generator = crypt_iv_lmk_gen,
655 .post = crypt_iv_lmk_post
656};
657
376static void crypt_convert_init(struct crypt_config *cc, 658static void crypt_convert_init(struct crypt_config *cc,
377 struct convert_context *ctx, 659 struct convert_context *ctx,
378 struct bio *bio_out, struct bio *bio_in, 660 struct bio *bio_out, struct bio *bio_in,
@@ -400,6 +682,13 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
400 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 682 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
401} 683}
402 684
685static u8 *iv_of_dmreq(struct crypt_config *cc,
686 struct dm_crypt_request *dmreq)
687{
688 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
689 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
690}
691
403static int crypt_convert_block(struct crypt_config *cc, 692static int crypt_convert_block(struct crypt_config *cc,
404 struct convert_context *ctx, 693 struct convert_context *ctx,
405 struct ablkcipher_request *req) 694 struct ablkcipher_request *req)
@@ -411,9 +700,9 @@ static int crypt_convert_block(struct crypt_config *cc,
411 int r = 0; 700 int r = 0;
412 701
413 dmreq = dmreq_of_req(cc, req); 702 dmreq = dmreq_of_req(cc, req);
414 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 703 iv = iv_of_dmreq(cc, dmreq);
415 crypto_ablkcipher_alignmask(cc->tfm) + 1);
416 704
705 dmreq->iv_sector = ctx->sector;
417 dmreq->ctx = ctx; 706 dmreq->ctx = ctx;
418 sg_init_table(&dmreq->sg_in, 1); 707 sg_init_table(&dmreq->sg_in, 1);
419 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 708 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -436,7 +725,7 @@ static int crypt_convert_block(struct crypt_config *cc,
436 } 725 }
437 726
438 if (cc->iv_gen_ops) { 727 if (cc->iv_gen_ops) {
439 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 728 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
440 if (r < 0) 729 if (r < 0)
441 return r; 730 return r;
442 } 731 }
@@ -449,21 +738,28 @@ static int crypt_convert_block(struct crypt_config *cc,
449 else 738 else
450 r = crypto_ablkcipher_decrypt(req); 739 r = crypto_ablkcipher_decrypt(req);
451 740
741 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
742 r = cc->iv_gen_ops->post(cc, iv, dmreq);
743
452 return r; 744 return r;
453} 745}
454 746
455static void kcryptd_async_done(struct crypto_async_request *async_req, 747static void kcryptd_async_done(struct crypto_async_request *async_req,
456 int error); 748 int error);
749
457static void crypt_alloc_req(struct crypt_config *cc, 750static void crypt_alloc_req(struct crypt_config *cc,
458 struct convert_context *ctx) 751 struct convert_context *ctx)
459{ 752{
460 if (!cc->req) 753 struct crypt_cpu *this_cc = this_crypt_config(cc);
461 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 754 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
462 ablkcipher_request_set_tfm(cc->req, cc->tfm); 755
463 ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | 756 if (!this_cc->req)
464 CRYPTO_TFM_REQ_MAY_SLEEP, 757 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
465 kcryptd_async_done, 758
466 dmreq_of_req(cc, cc->req)); 759 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
760 ablkcipher_request_set_callback(this_cc->req,
761 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
762 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
467} 763}
468 764
469/* 765/*
@@ -472,6 +768,7 @@ static void crypt_alloc_req(struct crypt_config *cc,
472static int crypt_convert(struct crypt_config *cc, 768static int crypt_convert(struct crypt_config *cc,
473 struct convert_context *ctx) 769 struct convert_context *ctx)
474{ 770{
771 struct crypt_cpu *this_cc = this_crypt_config(cc);
475 int r; 772 int r;
476 773
477 atomic_set(&ctx->pending, 1); 774 atomic_set(&ctx->pending, 1);
@@ -483,7 +780,7 @@ static int crypt_convert(struct crypt_config *cc,
483 780
484 atomic_inc(&ctx->pending); 781 atomic_inc(&ctx->pending);
485 782
486 r = crypt_convert_block(cc, ctx, cc->req); 783 r = crypt_convert_block(cc, ctx, this_cc->req);
487 784
488 switch (r) { 785 switch (r) {
489 /* async */ 786 /* async */
@@ -492,7 +789,7 @@ static int crypt_convert(struct crypt_config *cc,
492 INIT_COMPLETION(ctx->restart); 789 INIT_COMPLETION(ctx->restart);
493 /* fall through*/ 790 /* fall through*/
494 case -EINPROGRESS: 791 case -EINPROGRESS:
495 cc->req = NULL; 792 this_cc->req = NULL;
496 ctx->sector++; 793 ctx->sector++;
497 continue; 794 continue;
498 795
@@ -651,6 +948,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
651 * They must be separated as otherwise the final stages could be 948 * They must be separated as otherwise the final stages could be
652 * starved by new requests which can block in the first stages due 949 * starved by new requests which can block in the first stages due
653 * to memory allocation. 950 * to memory allocation.
951 *
952 * The work is done per CPU global for all dm-crypt instances.
953 * They should not depend on each other and do not block.
654 */ 954 */
655static void crypt_endio(struct bio *clone, int error) 955static void crypt_endio(struct bio *clone, int error)
656{ 956{
@@ -691,26 +991,30 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
691 clone->bi_destructor = dm_crypt_bio_destructor; 991 clone->bi_destructor = dm_crypt_bio_destructor;
692} 992}
693 993
694static void kcryptd_io_read(struct dm_crypt_io *io) 994static void kcryptd_unplug(struct crypt_config *cc)
995{
996 blk_unplug(bdev_get_queue(cc->dev->bdev));
997}
998
999static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
695{ 1000{
696 struct crypt_config *cc = io->target->private; 1001 struct crypt_config *cc = io->target->private;
697 struct bio *base_bio = io->base_bio; 1002 struct bio *base_bio = io->base_bio;
698 struct bio *clone; 1003 struct bio *clone;
699 1004
700 crypt_inc_pending(io);
701
702 /* 1005 /*
703 * The block layer might modify the bvec array, so always 1006 * The block layer might modify the bvec array, so always
704 * copy the required bvecs because we need the original 1007 * copy the required bvecs because we need the original
705 * one in order to decrypt the whole bio data *afterwards*. 1008 * one in order to decrypt the whole bio data *afterwards*.
706 */ 1009 */
707 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); 1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
708 if (unlikely(!clone)) { 1011 if (!clone) {
709 io->error = -ENOMEM; 1012 kcryptd_unplug(cc);
710 crypt_dec_pending(io); 1013 return 1;
711 return;
712 } 1014 }
713 1015
1016 crypt_inc_pending(io);
1017
714 clone_init(io, clone); 1018 clone_init(io, clone);
715 clone->bi_idx = 0; 1019 clone->bi_idx = 0;
716 clone->bi_vcnt = bio_segments(base_bio); 1020 clone->bi_vcnt = bio_segments(base_bio);
@@ -720,6 +1024,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
720 sizeof(struct bio_vec) * clone->bi_vcnt); 1024 sizeof(struct bio_vec) * clone->bi_vcnt);
721 1025
722 generic_make_request(clone); 1026 generic_make_request(clone);
1027 return 0;
723} 1028}
724 1029
725static void kcryptd_io_write(struct dm_crypt_io *io) 1030static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -732,9 +1037,12 @@ static void kcryptd_io(struct work_struct *work)
732{ 1037{
733 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 1038 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
734 1039
735 if (bio_data_dir(io->base_bio) == READ) 1040 if (bio_data_dir(io->base_bio) == READ) {
736 kcryptd_io_read(io); 1041 crypt_inc_pending(io);
737 else 1042 if (kcryptd_io_read(io, GFP_NOIO))
1043 io->error = -ENOMEM;
1044 crypt_dec_pending(io);
1045 } else
738 kcryptd_io_write(io); 1046 kcryptd_io_write(io);
739} 1047}
740 1048
@@ -901,6 +1209,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
901 return; 1209 return;
902 } 1210 }
903 1211
1212 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1213 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1214
904 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1215 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
905 1216
906 if (!atomic_dec_and_test(&ctx->pending)) 1217 if (!atomic_dec_and_test(&ctx->pending))
@@ -971,34 +1282,84 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
971 } 1282 }
972} 1283}
973 1284
974static int crypt_set_key(struct crypt_config *cc, char *key) 1285static void crypt_free_tfms(struct crypt_config *cc, int cpu)
975{ 1286{
976 unsigned key_size = strlen(key) >> 1; 1287 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1288 unsigned i;
977 1289
978 if (cc->key_size && cc->key_size != key_size) 1290 for (i = 0; i < cc->tfms_count; i++)
1291 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1292 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1293 cpu_cc->tfms[i] = NULL;
1294 }
1295}
1296
1297static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1298{
1299 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1300 unsigned i;
1301 int err;
1302
1303 for (i = 0; i < cc->tfms_count; i++) {
1304 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1305 if (IS_ERR(cpu_cc->tfms[i])) {
1306 err = PTR_ERR(cpu_cc->tfms[i]);
1307 crypt_free_tfms(cc, cpu);
1308 return err;
1309 }
1310 }
1311
1312 return 0;
1313}
1314
1315static int crypt_setkey_allcpus(struct crypt_config *cc)
1316{
1317 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1318 int cpu, err = 0, i, r;
1319
1320 for_each_possible_cpu(cpu) {
1321 for (i = 0; i < cc->tfms_count; i++) {
1322 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1323 cc->key + (i * subkey_size), subkey_size);
1324 if (r)
1325 err = r;
1326 }
1327 }
1328
1329 return err;
1330}
1331
1332static int crypt_set_key(struct crypt_config *cc, char *key)
1333{
1334 /* The key size may not be changed. */
1335 if (cc->key_size != (strlen(key) >> 1))
979 return -EINVAL; 1336 return -EINVAL;
980 1337
981 cc->key_size = key_size; /* initial settings */ 1338 /* Hyphen (which gives a key_size of zero) means there is no key. */
1339 if (!cc->key_size && strcmp(key, "-"))
1340 return -EINVAL;
982 1341
983 if ((!key_size && strcmp(key, "-")) || 1342 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
984 (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
985 return -EINVAL; 1343 return -EINVAL;
986 1344
987 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1345 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
988 1346
989 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1347 return crypt_setkey_allcpus(cc);
990} 1348}
991 1349
992static int crypt_wipe_key(struct crypt_config *cc) 1350static int crypt_wipe_key(struct crypt_config *cc)
993{ 1351{
994 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 1352 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
995 memset(&cc->key, 0, cc->key_size * sizeof(u8)); 1353 memset(&cc->key, 0, cc->key_size * sizeof(u8));
996 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1354
1355 return crypt_setkey_allcpus(cc);
997} 1356}
998 1357
999static void crypt_dtr(struct dm_target *ti) 1358static void crypt_dtr(struct dm_target *ti)
1000{ 1359{
1001 struct crypt_config *cc = ti->private; 1360 struct crypt_config *cc = ti->private;
1361 struct crypt_cpu *cpu_cc;
1362 int cpu;
1002 1363
1003 ti->private = NULL; 1364 ti->private = NULL;
1004 1365
@@ -1010,6 +1371,14 @@ static void crypt_dtr(struct dm_target *ti)
1010 if (cc->crypt_queue) 1371 if (cc->crypt_queue)
1011 destroy_workqueue(cc->crypt_queue); 1372 destroy_workqueue(cc->crypt_queue);
1012 1373
1374 if (cc->cpu)
1375 for_each_possible_cpu(cpu) {
1376 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1377 if (cpu_cc->req)
1378 mempool_free(cpu_cc->req, cc->req_pool);
1379 crypt_free_tfms(cc, cpu);
1380 }
1381
1013 if (cc->bs) 1382 if (cc->bs)
1014 bioset_free(cc->bs); 1383 bioset_free(cc->bs);
1015 1384
@@ -1023,14 +1392,14 @@ static void crypt_dtr(struct dm_target *ti)
1023 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1392 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1024 cc->iv_gen_ops->dtr(cc); 1393 cc->iv_gen_ops->dtr(cc);
1025 1394
1026 if (cc->tfm && !IS_ERR(cc->tfm))
1027 crypto_free_ablkcipher(cc->tfm);
1028
1029 if (cc->dev) 1395 if (cc->dev)
1030 dm_put_device(ti, cc->dev); 1396 dm_put_device(ti, cc->dev);
1031 1397
1398 if (cc->cpu)
1399 free_percpu(cc->cpu);
1400
1032 kzfree(cc->cipher); 1401 kzfree(cc->cipher);
1033 kzfree(cc->cipher_mode); 1402 kzfree(cc->cipher_string);
1034 1403
1035 /* Must zero key material before freeing */ 1404 /* Must zero key material before freeing */
1036 kzfree(cc); 1405 kzfree(cc);
@@ -1040,9 +1409,9 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1040 char *cipher_in, char *key) 1409 char *cipher_in, char *key)
1041{ 1410{
1042 struct crypt_config *cc = ti->private; 1411 struct crypt_config *cc = ti->private;
1043 char *tmp, *cipher, *chainmode, *ivmode, *ivopts; 1412 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1044 char *cipher_api = NULL; 1413 char *cipher_api = NULL;
1045 int ret = -EINVAL; 1414 int cpu, ret = -EINVAL;
1046 1415
1047 /* Convert to crypto api definition? */ 1416 /* Convert to crypto api definition? */
1048 if (strchr(cipher_in, '(')) { 1417 if (strchr(cipher_in, '(')) {
@@ -1050,23 +1419,31 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1050 return -EINVAL; 1419 return -EINVAL;
1051 } 1420 }
1052 1421
1422 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1423 if (!cc->cipher_string)
1424 goto bad_mem;
1425
1053 /* 1426 /*
1054 * Legacy dm-crypt cipher specification 1427 * Legacy dm-crypt cipher specification
1055 * cipher-mode-iv:ivopts 1428 * cipher[:keycount]-mode-iv:ivopts
1056 */ 1429 */
1057 tmp = cipher_in; 1430 tmp = cipher_in;
1058 cipher = strsep(&tmp, "-"); 1431 keycount = strsep(&tmp, "-");
1432 cipher = strsep(&keycount, ":");
1433
1434 if (!keycount)
1435 cc->tfms_count = 1;
1436 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1437 !is_power_of_2(cc->tfms_count)) {
1438 ti->error = "Bad cipher key count specification";
1439 return -EINVAL;
1440 }
1441 cc->key_parts = cc->tfms_count;
1059 1442
1060 cc->cipher = kstrdup(cipher, GFP_KERNEL); 1443 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1061 if (!cc->cipher) 1444 if (!cc->cipher)
1062 goto bad_mem; 1445 goto bad_mem;
1063 1446
1064 if (tmp) {
1065 cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
1066 if (!cc->cipher_mode)
1067 goto bad_mem;
1068 }
1069
1070 chainmode = strsep(&tmp, "-"); 1447 chainmode = strsep(&tmp, "-");
1071 ivopts = strsep(&tmp, "-"); 1448 ivopts = strsep(&tmp, "-");
1072 ivmode = strsep(&ivopts, ":"); 1449 ivmode = strsep(&ivopts, ":");
@@ -1074,10 +1451,19 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1074 if (tmp) 1451 if (tmp)
1075 DMWARN("Ignoring unexpected additional cipher options"); 1452 DMWARN("Ignoring unexpected additional cipher options");
1076 1453
1077 /* Compatibility mode for old dm-crypt mappings */ 1454 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1455 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1456 __alignof__(struct crypt_cpu));
1457 if (!cc->cpu) {
1458 ti->error = "Cannot allocate per cpu state";
1459 goto bad_mem;
1460 }
1461
1462 /*
1463 * For compatibility with the original dm-crypt mapping format, if
1464 * only the cipher name is supplied, use cbc-plain.
1465 */
1078 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1466 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1079 kfree(cc->cipher_mode);
1080 cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
1081 chainmode = "cbc"; 1467 chainmode = "cbc";
1082 ivmode = "plain"; 1468 ivmode = "plain";
1083 } 1469 }
@@ -1099,11 +1485,12 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1099 } 1485 }
1100 1486
1101 /* Allocate cipher */ 1487 /* Allocate cipher */
1102 cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); 1488 for_each_possible_cpu(cpu) {
1103 if (IS_ERR(cc->tfm)) { 1489 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1104 ret = PTR_ERR(cc->tfm); 1490 if (ret < 0) {
1105 ti->error = "Error allocating crypto tfm"; 1491 ti->error = "Error allocating crypto tfm";
1106 goto bad; 1492 goto bad;
1493 }
1107 } 1494 }
1108 1495
1109 /* Initialize and set key */ 1496 /* Initialize and set key */
@@ -1114,7 +1501,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1114 } 1501 }
1115 1502
1116 /* Initialize IV */ 1503 /* Initialize IV */
1117 cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); 1504 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1118 if (cc->iv_size) 1505 if (cc->iv_size)
1119 /* at least a 64 bit sector number should fit in our buffer */ 1506 /* at least a 64 bit sector number should fit in our buffer */
1120 cc->iv_size = max(cc->iv_size, 1507 cc->iv_size = max(cc->iv_size,
@@ -1137,7 +1524,15 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1137 cc->iv_gen_ops = &crypt_iv_benbi_ops; 1524 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1138 else if (strcmp(ivmode, "null") == 0) 1525 else if (strcmp(ivmode, "null") == 0)
1139 cc->iv_gen_ops = &crypt_iv_null_ops; 1526 cc->iv_gen_ops = &crypt_iv_null_ops;
1140 else { 1527 else if (strcmp(ivmode, "lmk") == 0) {
1528 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1529 /* Version 2 and 3 is recognised according
1530 * to length of provided multi-key string.
1531 * If present (version 3), last key is used as IV seed.
1532 */
1533 if (cc->key_size % cc->key_parts)
1534 cc->key_parts++;
1535 } else {
1141 ret = -EINVAL; 1536 ret = -EINVAL;
1142 ti->error = "Invalid IV mode"; 1537 ti->error = "Invalid IV mode";
1143 goto bad; 1538 goto bad;
@@ -1194,6 +1589,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1194 ti->error = "Cannot allocate encryption context"; 1589 ti->error = "Cannot allocate encryption context";
1195 return -ENOMEM; 1590 return -ENOMEM;
1196 } 1591 }
1592 cc->key_size = key_size;
1197 1593
1198 ti->private = cc; 1594 ti->private = cc;
1199 ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1595 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1208,9 +1604,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1208 } 1604 }
1209 1605
1210 cc->dmreq_start = sizeof(struct ablkcipher_request); 1606 cc->dmreq_start = sizeof(struct ablkcipher_request);
1211 cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); 1607 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1212 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1608 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1213 cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & 1609 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1214 ~(crypto_tfm_ctx_alignment() - 1); 1610 ~(crypto_tfm_ctx_alignment() - 1);
1215 1611
1216 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1612 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1615,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1219 ti->error = "Cannot allocate crypt request mempool"; 1615 ti->error = "Cannot allocate crypt request mempool";
1220 goto bad; 1616 goto bad;
1221 } 1617 }
1222 cc->req = NULL;
1223 1618
1224 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1619 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1225 if (!cc->page_pool) { 1620 if (!cc->page_pool) {
@@ -1252,13 +1647,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1252 cc->start = tmpll; 1647 cc->start = tmpll;
1253 1648
1254 ret = -ENOMEM; 1649 ret = -ENOMEM;
1255 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1650 cc->io_queue = alloc_workqueue("kcryptd_io",
1651 WQ_NON_REENTRANT|
1652 WQ_MEM_RECLAIM,
1653 1);
1256 if (!cc->io_queue) { 1654 if (!cc->io_queue) {
1257 ti->error = "Couldn't create kcryptd io queue"; 1655 ti->error = "Couldn't create kcryptd io queue";
1258 goto bad; 1656 goto bad;
1259 } 1657 }
1260 1658
1261 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1659 cc->crypt_queue = alloc_workqueue("kcryptd",
1660 WQ_NON_REENTRANT|
1661 WQ_CPU_INTENSIVE|
1662 WQ_MEM_RECLAIM,
1663 1);
1262 if (!cc->crypt_queue) { 1664 if (!cc->crypt_queue) {
1263 ti->error = "Couldn't create kcryptd queue"; 1665 ti->error = "Couldn't create kcryptd queue";
1264 goto bad; 1666 goto bad;
@@ -1286,9 +1688,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
1286 1688
1287 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1689 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1288 1690
1289 if (bio_data_dir(io->base_bio) == READ) 1691 if (bio_data_dir(io->base_bio) == READ) {
1290 kcryptd_queue_io(io); 1692 if (kcryptd_io_read(io, GFP_NOWAIT))
1291 else 1693 kcryptd_queue_io(io);
1694 } else
1292 kcryptd_queue_crypt(io); 1695 kcryptd_queue_crypt(io);
1293 1696
1294 return DM_MAPIO_SUBMITTED; 1697 return DM_MAPIO_SUBMITTED;
@@ -1306,10 +1709,7 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
1306 break; 1709 break;
1307 1710
1308 case STATUSTYPE_TABLE: 1711 case STATUSTYPE_TABLE:
1309 if (cc->cipher_mode) 1712 DMEMIT("%s ", cc->cipher_string);
1310 DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
1311 else
1312 DMEMIT("%s ", cc->cipher);
1313 1713
1314 if (cc->key_size > 0) { 1714 if (cc->key_size > 0) {
1315 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) 1715 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1421,7 +1821,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1421 1821
1422static struct target_type crypt_target = { 1822static struct target_type crypt_target = {
1423 .name = "crypt", 1823 .name = "crypt",
1424 .version = {1, 7, 0}, 1824 .version = {1, 10, 0},
1425 .module = THIS_MODULE, 1825 .module = THIS_MODULE,
1426 .ctr = crypt_ctr, 1826 .ctr = crypt_ctr,
1427 .dtr = crypt_dtr, 1827 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index baa11912cc94..f18375dcedd9 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -352,7 +352,7 @@ static int __init dm_delay_init(void)
352{ 352{
353 int r = -ENOMEM; 353 int r = -ENOMEM;
354 354
355 kdelayd_wq = create_workqueue("kdelayd"); 355 kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
356 if (!kdelayd_wq) { 356 if (!kdelayd_wq) {
357 DMERR("Couldn't start kdelayd"); 357 DMERR("Couldn't start kdelayd");
358 goto bad_queue; 358 goto bad_queue;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4b54618b4159..6d12775a1061 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -295,19 +295,55 @@ retry:
295 DMWARN("remove_all left %d open device(s)", dev_skipped); 295 DMWARN("remove_all left %d open device(s)", dev_skipped);
296} 296}
297 297
298/*
299 * Set the uuid of a hash_cell that isn't already set.
300 */
301static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
302{
303 mutex_lock(&dm_hash_cells_mutex);
304 hc->uuid = new_uuid;
305 mutex_unlock(&dm_hash_cells_mutex);
306
307 list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
308}
309
310/*
311 * Changes the name of a hash_cell and returns the old name for
312 * the caller to free.
313 */
314static char *__change_cell_name(struct hash_cell *hc, char *new_name)
315{
316 char *old_name;
317
318 /*
319 * Rename and move the name cell.
320 */
321 list_del(&hc->name_list);
322 old_name = hc->name;
323
324 mutex_lock(&dm_hash_cells_mutex);
325 hc->name = new_name;
326 mutex_unlock(&dm_hash_cells_mutex);
327
328 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
329
330 return old_name;
331}
332
298static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, 333static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
299 const char *new) 334 const char *new)
300{ 335{
301 char *new_name, *old_name; 336 char *new_data, *old_name = NULL;
302 struct hash_cell *hc; 337 struct hash_cell *hc;
303 struct dm_table *table; 338 struct dm_table *table;
304 struct mapped_device *md; 339 struct mapped_device *md;
340 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
305 341
306 /* 342 /*
307 * duplicate new. 343 * duplicate new.
308 */ 344 */
309 new_name = kstrdup(new, GFP_KERNEL); 345 new_data = kstrdup(new, GFP_KERNEL);
310 if (!new_name) 346 if (!new_data)
311 return ERR_PTR(-ENOMEM); 347 return ERR_PTR(-ENOMEM);
312 348
313 down_write(&_hash_lock); 349 down_write(&_hash_lock);
@@ -315,13 +351,19 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
315 /* 351 /*
316 * Is new free ? 352 * Is new free ?
317 */ 353 */
318 hc = __get_name_cell(new); 354 if (change_uuid)
355 hc = __get_uuid_cell(new);
356 else
357 hc = __get_name_cell(new);
358
319 if (hc) { 359 if (hc) {
320 DMWARN("asked to rename to an already-existing name %s -> %s", 360 DMWARN("Unable to change %s on mapped device %s to one that "
361 "already exists: %s",
362 change_uuid ? "uuid" : "name",
321 param->name, new); 363 param->name, new);
322 dm_put(hc->md); 364 dm_put(hc->md);
323 up_write(&_hash_lock); 365 up_write(&_hash_lock);
324 kfree(new_name); 366 kfree(new_data);
325 return ERR_PTR(-EBUSY); 367 return ERR_PTR(-EBUSY);
326 } 368 }
327 369
@@ -330,22 +372,30 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
330 */ 372 */
331 hc = __get_name_cell(param->name); 373 hc = __get_name_cell(param->name);
332 if (!hc) { 374 if (!hc) {
333 DMWARN("asked to rename a non-existent device %s -> %s", 375 DMWARN("Unable to rename non-existent device, %s to %s%s",
334 param->name, new); 376 param->name, change_uuid ? "uuid " : "", new);
335 up_write(&_hash_lock); 377 up_write(&_hash_lock);
336 kfree(new_name); 378 kfree(new_data);
337 return ERR_PTR(-ENXIO); 379 return ERR_PTR(-ENXIO);
338 } 380 }
339 381
340 /* 382 /*
341 * rename and move the name cell. 383 * Does this device already have a uuid?
342 */ 384 */
343 list_del(&hc->name_list); 385 if (change_uuid && hc->uuid) {
344 old_name = hc->name; 386 DMWARN("Unable to change uuid of mapped device %s to %s "
345 mutex_lock(&dm_hash_cells_mutex); 387 "because uuid is already set to %s",
346 hc->name = new_name; 388 param->name, new, hc->uuid);
347 mutex_unlock(&dm_hash_cells_mutex); 389 dm_put(hc->md);
348 list_add(&hc->name_list, _name_buckets + hash_str(new_name)); 390 up_write(&_hash_lock);
391 kfree(new_data);
392 return ERR_PTR(-EINVAL);
393 }
394
395 if (change_uuid)
396 __set_cell_uuid(hc, new_data);
397 else
398 old_name = __change_cell_name(hc, new_data);
349 399
350 /* 400 /*
351 * Wake up any dm event waiters. 401 * Wake up any dm event waiters.
@@ -729,7 +779,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
729 hc = __find_device_hash_cell(param); 779 hc = __find_device_hash_cell(param);
730 780
731 if (!hc) { 781 if (!hc) {
732 DMWARN("device doesn't appear to be in the dev hash table."); 782 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
733 up_write(&_hash_lock); 783 up_write(&_hash_lock);
734 return -ENXIO; 784 return -ENXIO;
735 } 785 }
@@ -741,7 +791,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
741 */ 791 */
742 r = dm_lock_for_deletion(md); 792 r = dm_lock_for_deletion(md);
743 if (r) { 793 if (r) {
744 DMWARN("unable to remove open device %s", hc->name); 794 DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
745 up_write(&_hash_lock); 795 up_write(&_hash_lock);
746 dm_put(md); 796 dm_put(md);
747 return r; 797 return r;
@@ -774,21 +824,24 @@ static int invalid_str(char *str, void *end)
774static int dev_rename(struct dm_ioctl *param, size_t param_size) 824static int dev_rename(struct dm_ioctl *param, size_t param_size)
775{ 825{
776 int r; 826 int r;
777 char *new_name = (char *) param + param->data_start; 827 char *new_data = (char *) param + param->data_start;
778 struct mapped_device *md; 828 struct mapped_device *md;
829 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
779 830
780 if (new_name < param->data || 831 if (new_data < param->data ||
781 invalid_str(new_name, (void *) param + param_size) || 832 invalid_str(new_data, (void *) param + param_size) ||
782 strlen(new_name) > DM_NAME_LEN - 1) { 833 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
783 DMWARN("Invalid new logical volume name supplied."); 834 DMWARN("Invalid new mapped device name or uuid string supplied.");
784 return -EINVAL; 835 return -EINVAL;
785 } 836 }
786 837
787 r = check_name(new_name); 838 if (!change_uuid) {
788 if (r) 839 r = check_name(new_data);
789 return r; 840 if (r)
841 return r;
842 }
790 843
791 md = dm_hash_rename(param, new_name); 844 md = dm_hash_rename(param, new_data);
792 if (IS_ERR(md)) 845 if (IS_ERR(md))
793 return PTR_ERR(md); 846 return PTR_ERR(md);
794 847
@@ -885,7 +938,7 @@ static int do_resume(struct dm_ioctl *param)
885 938
886 hc = __find_device_hash_cell(param); 939 hc = __find_device_hash_cell(param);
887 if (!hc) { 940 if (!hc) {
888 DMWARN("device doesn't appear to be in the dev hash table."); 941 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
889 up_write(&_hash_lock); 942 up_write(&_hash_lock);
890 return -ENXIO; 943 return -ENXIO;
891 } 944 }
@@ -1212,7 +1265,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size)
1212 1265
1213 hc = __find_device_hash_cell(param); 1266 hc = __find_device_hash_cell(param);
1214 if (!hc) { 1267 if (!hc) {
1215 DMWARN("device doesn't appear to be in the dev hash table."); 1268 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
1216 up_write(&_hash_lock); 1269 up_write(&_hash_lock);
1217 return -ENXIO; 1270 return -ENXIO;
1218 } 1271 }
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d8587bac5682..924f5f0084c2 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
40 struct dm_io_client *io_client; 47 struct dm_io_client *io_client;
41 48
42 wait_queue_head_t destroyq; 49 wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@ static int run_complete_job(struct kcopyd_job *job)
308 return 0; 315 return 0;
309} 316}
310 317
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
311static void complete_io(unsigned long error, void *context) 343static void complete_io(unsigned long error, void *context)
312{ 344{
313 struct kcopyd_job *job = (struct kcopyd_job *) context; 345 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 377{
346 int r; 378 int r;
347 struct dm_io_request io_req = { 379 struct dm_io_request io_req = {
348 .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, 380 .bi_rw = job->rw,
349 .mem.type = DM_IO_PAGE_LIST, 381 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 382 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 383 .mem.offset = job->offset,
@@ -354,10 +386,16 @@ static int run_io_job(struct kcopyd_job *job)
354 .client = job->kc->io_client, 386 .client = job->kc->io_client,
355 }; 387 };
356 388
357 if (job->rw == READ) 389 if (job->rw == READ) {
358 r = dm_io(&io_req, 1, &job->source, NULL); 390 r = dm_io(&io_req, 1, &job->source, NULL);
359 else 391 prepare_unplug(job->kc, READ, job->source.bdev);
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
360 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 395 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 }
361 399
362 return r; 400 return r;
363} 401}
@@ -435,10 +473,18 @@ static void do_work(struct work_struct *work)
435 * Pages jobs when successful will jump onto the io jobs 473 * Pages jobs when successful will jump onto the io jobs
436 * list. io jobs call wake when they complete and it all 474 * list. io jobs call wake when they complete and it all
437 * starts again. 475 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
438 */ 482 */
439 process_jobs(&kc->complete_jobs, kc, run_complete_job); 483 process_jobs(&kc->complete_jobs, kc, run_complete_job);
440 process_jobs(&kc->pages_jobs, kc, run_pages_job); 484 process_jobs(&kc->pages_jobs, kc, run_pages_job);
441 process_jobs(&kc->io_jobs, kc, run_io_job); 485 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ);
487 unplug(kc, WRITE);
442} 488}
443 489
444/* 490/*
@@ -619,12 +665,15 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
619 INIT_LIST_HEAD(&kc->io_jobs); 665 INIT_LIST_HEAD(&kc->io_jobs);
620 INIT_LIST_HEAD(&kc->pages_jobs); 666 INIT_LIST_HEAD(&kc->pages_jobs);
621 667
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
622 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
623 if (!kc->job_pool) 671 if (!kc->job_pool)
624 goto bad_slab; 672 goto bad_slab;
625 673
626 INIT_WORK(&kc->kcopyd_work, do_work); 674 INIT_WORK(&kc->kcopyd_work, do_work);
627 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); 675 kc->kcopyd_wq = alloc_workqueue("kcopyd",
676 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
628 if (!kc->kcopyd_wq) 677 if (!kc->kcopyd_wq)
629 goto bad_workqueue; 678 goto bad_workqueue;
630 679
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1ed0094f064b..aa2e0c374ab3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -12,12 +12,22 @@
12 12
13#include "dm-log-userspace-transfer.h" 13#include "dm-log-userspace-transfer.h"
14 14
15#define DM_LOG_USERSPACE_VSN "1.1.0"
16
15struct flush_entry { 17struct flush_entry {
16 int type; 18 int type;
17 region_t region; 19 region_t region;
18 struct list_head list; 20 struct list_head list;
19}; 21};
20 22
23/*
24 * This limit on the number of mark and clear request is, to a degree,
25 * arbitrary. However, there is some basis for the choice in the limits
26 * imposed on the size of data payload by dm-log-userspace-transfer.c:
27 * dm_consult_userspace().
28 */
29#define MAX_FLUSH_GROUP_COUNT 32
30
21struct log_c { 31struct log_c {
22 struct dm_target *ti; 32 struct dm_target *ti;
23 uint32_t region_size; 33 uint32_t region_size;
@@ -37,8 +47,15 @@ struct log_c {
37 */ 47 */
38 uint64_t in_sync_hint; 48 uint64_t in_sync_hint;
39 49
50 /*
51 * Mark and clear requests are held until a flush is issued
52 * so that we can group, and thereby limit, the amount of
53 * network traffic between kernel and userspace. The 'flush_lock'
54 * is used to protect these lists.
55 */
40 spinlock_t flush_lock; 56 spinlock_t flush_lock;
41 struct list_head flush_list; /* only for clear and mark requests */ 57 struct list_head mark_list;
58 struct list_head clear_list;
42}; 59};
43 60
44static mempool_t *flush_entry_pool; 61static mempool_t *flush_entry_pool;
@@ -169,7 +186,8 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
169 186
170 strncpy(lc->uuid, argv[0], DM_UUID_LEN); 187 strncpy(lc->uuid, argv[0], DM_UUID_LEN);
171 spin_lock_init(&lc->flush_lock); 188 spin_lock_init(&lc->flush_lock);
172 INIT_LIST_HEAD(&lc->flush_list); 189 INIT_LIST_HEAD(&lc->mark_list);
190 INIT_LIST_HEAD(&lc->clear_list);
173 191
174 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str); 192 str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
175 if (str_size < 0) { 193 if (str_size < 0) {
@@ -181,8 +199,11 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
181 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, 199 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
182 ctr_str, str_size, NULL, NULL); 200 ctr_str, str_size, NULL, NULL);
183 201
184 if (r == -ESRCH) { 202 if (r < 0) {
185 DMERR("Userspace log server not found"); 203 if (r == -ESRCH)
204 DMERR("Userspace log server not found");
205 else
206 DMERR("Userspace log server failed to create log");
186 goto out; 207 goto out;
187 } 208 }
188 209
@@ -214,10 +235,9 @@ out:
214 235
215static void userspace_dtr(struct dm_dirty_log *log) 236static void userspace_dtr(struct dm_dirty_log *log)
216{ 237{
217 int r;
218 struct log_c *lc = log->context; 238 struct log_c *lc = log->context;
219 239
220 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, 240 (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
221 NULL, 0, 241 NULL, 0,
222 NULL, NULL); 242 NULL, NULL);
223 243
@@ -338,6 +358,71 @@ static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
338 return (r) ? 0 : (int)in_sync; 358 return (r) ? 0 : (int)in_sync;
339} 359}
340 360
361static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
362{
363 int r = 0;
364 struct flush_entry *fe;
365
366 list_for_each_entry(fe, flush_list, list) {
367 r = userspace_do_request(lc, lc->uuid, fe->type,
368 (char *)&fe->region,
369 sizeof(fe->region),
370 NULL, NULL);
371 if (r)
372 break;
373 }
374
375 return r;
376}
377
378static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
379{
380 int r = 0;
381 int count;
382 uint32_t type = 0;
383 struct flush_entry *fe, *tmp_fe;
384 LIST_HEAD(tmp_list);
385 uint64_t group[MAX_FLUSH_GROUP_COUNT];
386
387 /*
388 * Group process the requests
389 */
390 while (!list_empty(flush_list)) {
391 count = 0;
392
393 list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
394 group[count] = fe->region;
395 count++;
396
397 list_del(&fe->list);
398 list_add(&fe->list, &tmp_list);
399
400 type = fe->type;
401 if (count >= MAX_FLUSH_GROUP_COUNT)
402 break;
403 }
404
405 r = userspace_do_request(lc, lc->uuid, type,
406 (char *)(group),
407 count * sizeof(uint64_t),
408 NULL, NULL);
409 if (r) {
410 /* Group send failed. Attempt one-by-one. */
411 list_splice_init(&tmp_list, flush_list);
412 r = flush_one_by_one(lc, flush_list);
413 break;
414 }
415 }
416
417 /*
418 * Must collect flush_entrys that were successfully processed
419 * as a group so that they will be free'd by the caller.
420 */
421 list_splice_init(&tmp_list, flush_list);
422
423 return r;
424}
425
341/* 426/*
342 * userspace_flush 427 * userspace_flush
343 * 428 *
@@ -360,31 +445,25 @@ static int userspace_flush(struct dm_dirty_log *log)
360 int r = 0; 445 int r = 0;
361 unsigned long flags; 446 unsigned long flags;
362 struct log_c *lc = log->context; 447 struct log_c *lc = log->context;
363 LIST_HEAD(flush_list); 448 LIST_HEAD(mark_list);
449 LIST_HEAD(clear_list);
364 struct flush_entry *fe, *tmp_fe; 450 struct flush_entry *fe, *tmp_fe;
365 451
366 spin_lock_irqsave(&lc->flush_lock, flags); 452 spin_lock_irqsave(&lc->flush_lock, flags);
367 list_splice_init(&lc->flush_list, &flush_list); 453 list_splice_init(&lc->mark_list, &mark_list);
454 list_splice_init(&lc->clear_list, &clear_list);
368 spin_unlock_irqrestore(&lc->flush_lock, flags); 455 spin_unlock_irqrestore(&lc->flush_lock, flags);
369 456
370 if (list_empty(&flush_list)) 457 if (list_empty(&mark_list) && list_empty(&clear_list))
371 return 0; 458 return 0;
372 459
373 /* 460 r = flush_by_group(lc, &mark_list);
374 * FIXME: Count up requests, group request types, 461 if (r)
375 * allocate memory to stick all requests in and 462 goto fail;
376 * send to server in one go. Failing the allocation,
377 * do it one by one.
378 */
379 463
380 list_for_each_entry(fe, &flush_list, list) { 464 r = flush_by_group(lc, &clear_list);
381 r = userspace_do_request(lc, lc->uuid, fe->type, 465 if (r)
382 (char *)&fe->region, 466 goto fail;
383 sizeof(fe->region),
384 NULL, NULL);
385 if (r)
386 goto fail;
387 }
388 467
389 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH, 468 r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
390 NULL, 0, NULL, NULL); 469 NULL, 0, NULL, NULL);
@@ -395,7 +474,11 @@ fail:
395 * Calling code will receive an error and will know that 474 * Calling code will receive an error and will know that
396 * the log facility has failed. 475 * the log facility has failed.
397 */ 476 */
398 list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) { 477 list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
478 list_del(&fe->list);
479 mempool_free(fe, flush_entry_pool);
480 }
481 list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
399 list_del(&fe->list); 482 list_del(&fe->list);
400 mempool_free(fe, flush_entry_pool); 483 mempool_free(fe, flush_entry_pool);
401 } 484 }
@@ -425,7 +508,7 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
425 spin_lock_irqsave(&lc->flush_lock, flags); 508 spin_lock_irqsave(&lc->flush_lock, flags);
426 fe->type = DM_ULOG_MARK_REGION; 509 fe->type = DM_ULOG_MARK_REGION;
427 fe->region = region; 510 fe->region = region;
428 list_add(&fe->list, &lc->flush_list); 511 list_add(&fe->list, &lc->mark_list);
429 spin_unlock_irqrestore(&lc->flush_lock, flags); 512 spin_unlock_irqrestore(&lc->flush_lock, flags);
430 513
431 return; 514 return;
@@ -462,7 +545,7 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
462 spin_lock_irqsave(&lc->flush_lock, flags); 545 spin_lock_irqsave(&lc->flush_lock, flags);
463 fe->type = DM_ULOG_CLEAR_REGION; 546 fe->type = DM_ULOG_CLEAR_REGION;
464 fe->region = region; 547 fe->region = region;
465 list_add(&fe->list, &lc->flush_list); 548 list_add(&fe->list, &lc->clear_list);
466 spin_unlock_irqrestore(&lc->flush_lock, flags); 549 spin_unlock_irqrestore(&lc->flush_lock, flags);
467 550
468 return; 551 return;
@@ -684,7 +767,7 @@ static int __init userspace_dirty_log_init(void)
684 return r; 767 return r;
685 } 768 }
686 769
687 DMINFO("version 1.0.0 loaded"); 770 DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
688 return 0; 771 return 0;
689} 772}
690 773
@@ -694,7 +777,7 @@ static void __exit userspace_dirty_log_exit(void)
694 dm_ulog_tfr_exit(); 777 dm_ulog_tfr_exit();
695 mempool_destroy(flush_entry_pool); 778 mempool_destroy(flush_entry_pool);
696 779
697 DMINFO("version 1.0.0 unloaded"); 780 DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
698 return; 781 return;
699} 782}
700 783
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 075cbcf8a9f5..049eaf12aaab 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -198,6 +198,7 @@ resend:
198 198
199 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); 199 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
200 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 200 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
201 tfr->version = DM_ULOG_REQUEST_VERSION;
201 tfr->luid = luid; 202 tfr->luid = luid;
202 tfr->seq = dm_ulog_seq++; 203 tfr->seq = dm_ulog_seq++;
203 204
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 33420e68d153..6951536ea29c 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -455,7 +455,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
455 r = PTR_ERR(lc->io_req.client); 455 r = PTR_ERR(lc->io_req.client);
456 DMWARN("couldn't allocate disk io client"); 456 DMWARN("couldn't allocate disk io client");
457 kfree(lc); 457 kfree(lc);
458 return -ENOMEM; 458 return r;
459 } 459 }
460 460
461 lc->disk_header = vmalloc(buf_size); 461 lc->disk_header = vmalloc(buf_size);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 487ecda90ad4..b82d28819e2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,8 @@
23 23
24#define DM_MSG_PREFIX "multipath" 24#define DM_MSG_PREFIX "multipath"
25#define MESG_STR(x) x, sizeof(x) 25#define MESG_STR(x) x, sizeof(x)
26#define DM_PG_INIT_DELAY_MSECS 2000
27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
26 28
27/* Path properties */ 29/* Path properties */
28struct pgpath { 30struct pgpath {
@@ -33,8 +35,7 @@ struct pgpath {
33 unsigned fail_count; /* Cumulative failure count */ 35 unsigned fail_count; /* Cumulative failure count */
34 36
35 struct dm_path path; 37 struct dm_path path;
36 struct work_struct deactivate_path; 38 struct delayed_work activate_path;
37 struct work_struct activate_path;
38}; 39};
39 40
40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -65,11 +66,15 @@ struct multipath {
65 66
66 const char *hw_handler_name; 67 const char *hw_handler_name;
67 char *hw_handler_params; 68 char *hw_handler_params;
69
68 unsigned nr_priority_groups; 70 unsigned nr_priority_groups;
69 struct list_head priority_groups; 71 struct list_head priority_groups;
72
73 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
74
70 unsigned pg_init_required; /* pg_init needs calling? */ 75 unsigned pg_init_required; /* pg_init needs calling? */
71 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 76 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
72 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 77 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
73 78
74 unsigned nr_valid_paths; /* Total number of usable paths */ 79 unsigned nr_valid_paths; /* Total number of usable paths */
75 struct pgpath *current_pgpath; 80 struct pgpath *current_pgpath;
@@ -82,6 +87,7 @@ struct multipath {
82 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 87 unsigned saved_queue_if_no_path;/* Saved state during suspension */
83 unsigned pg_init_retries; /* Number of times to retry pg_init */ 88 unsigned pg_init_retries; /* Number of times to retry pg_init */
84 unsigned pg_init_count; /* Number of times pg_init called */ 89 unsigned pg_init_count; /* Number of times pg_init called */
90 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
85 91
86 struct work_struct process_queued_ios; 92 struct work_struct process_queued_ios;
87 struct list_head queued_ios; 93 struct list_head queued_ios;
@@ -116,7 +122,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
116static void process_queued_ios(struct work_struct *work); 122static void process_queued_ios(struct work_struct *work);
117static void trigger_event(struct work_struct *work); 123static void trigger_event(struct work_struct *work);
118static void activate_path(struct work_struct *work); 124static void activate_path(struct work_struct *work);
119static void deactivate_path(struct work_struct *work);
120 125
121 126
122/*----------------------------------------------- 127/*-----------------------------------------------
@@ -129,8 +134,7 @@ static struct pgpath *alloc_pgpath(void)
129 134
130 if (pgpath) { 135 if (pgpath) {
131 pgpath->is_active = 1; 136 pgpath->is_active = 1;
132 INIT_WORK(&pgpath->deactivate_path, deactivate_path); 137 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
133 INIT_WORK(&pgpath->activate_path, activate_path);
134 } 138 }
135 139
136 return pgpath; 140 return pgpath;
@@ -141,14 +145,6 @@ static void free_pgpath(struct pgpath *pgpath)
141 kfree(pgpath); 145 kfree(pgpath);
142} 146}
143 147
144static void deactivate_path(struct work_struct *work)
145{
146 struct pgpath *pgpath =
147 container_of(work, struct pgpath, deactivate_path);
148
149 blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
150}
151
152static struct priority_group *alloc_priority_group(void) 148static struct priority_group *alloc_priority_group(void)
153{ 149{
154 struct priority_group *pg; 150 struct priority_group *pg;
@@ -199,6 +195,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
199 INIT_LIST_HEAD(&m->queued_ios); 195 INIT_LIST_HEAD(&m->queued_ios);
200 spin_lock_init(&m->lock); 196 spin_lock_init(&m->lock);
201 m->queue_io = 1; 197 m->queue_io = 1;
198 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
202 INIT_WORK(&m->process_queued_ios, process_queued_ios); 199 INIT_WORK(&m->process_queued_ios, process_queued_ios);
203 INIT_WORK(&m->trigger_event, trigger_event); 200 INIT_WORK(&m->trigger_event, trigger_event);
204 init_waitqueue_head(&m->pg_init_wait); 201 init_waitqueue_head(&m->pg_init_wait);
@@ -238,14 +235,19 @@ static void free_multipath(struct multipath *m)
238static void __pg_init_all_paths(struct multipath *m) 235static void __pg_init_all_paths(struct multipath *m)
239{ 236{
240 struct pgpath *pgpath; 237 struct pgpath *pgpath;
238 unsigned long pg_init_delay = 0;
241 239
242 m->pg_init_count++; 240 m->pg_init_count++;
243 m->pg_init_required = 0; 241 m->pg_init_required = 0;
242 if (m->pg_init_delay_retry)
243 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
244 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
244 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 245 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
245 /* Skip failed paths */ 246 /* Skip failed paths */
246 if (!pgpath->is_active) 247 if (!pgpath->is_active)
247 continue; 248 continue;
248 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 249 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
250 pg_init_delay))
249 m->pg_init_in_progress++; 251 m->pg_init_in_progress++;
250 } 252 }
251} 253}
@@ -793,8 +795,9 @@ static int parse_features(struct arg_set *as, struct multipath *m)
793 const char *param_name; 795 const char *param_name;
794 796
795 static struct param _params[] = { 797 static struct param _params[] = {
796 {0, 3, "invalid number of feature args"}, 798 {0, 5, "invalid number of feature args"},
797 {1, 50, "pg_init_retries must be between 1 and 50"}, 799 {1, 50, "pg_init_retries must be between 1 and 50"},
800 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
798 }; 801 };
799 802
800 r = read_param(_params, shift(as), &argc, &ti->error); 803 r = read_param(_params, shift(as), &argc, &ti->error);
@@ -821,6 +824,14 @@ static int parse_features(struct arg_set *as, struct multipath *m)
821 continue; 824 continue;
822 } 825 }
823 826
827 if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
828 (argc >= 1)) {
829 r = read_param(_params + 2, shift(as),
830 &m->pg_init_delay_msecs, &ti->error);
831 argc--;
832 continue;
833 }
834
824 ti->error = "Unrecognised multipath feature request"; 835 ti->error = "Unrecognised multipath feature request";
825 r = -EINVAL; 836 r = -EINVAL;
826 } while (argc && !r); 837 } while (argc && !r);
@@ -931,7 +942,7 @@ static void flush_multipath_work(struct multipath *m)
931 flush_workqueue(kmpath_handlerd); 942 flush_workqueue(kmpath_handlerd);
932 multipath_wait_for_pg_init_completion(m); 943 multipath_wait_for_pg_init_completion(m);
933 flush_workqueue(kmultipathd); 944 flush_workqueue(kmultipathd);
934 flush_scheduled_work(); 945 flush_work_sync(&m->trigger_event);
935} 946}
936 947
937static void multipath_dtr(struct dm_target *ti) 948static void multipath_dtr(struct dm_target *ti)
@@ -995,7 +1006,6 @@ static int fail_path(struct pgpath *pgpath)
995 pgpath->path.dev->name, m->nr_valid_paths); 1006 pgpath->path.dev->name, m->nr_valid_paths);
996 1007
997 schedule_work(&m->trigger_event); 1008 schedule_work(&m->trigger_event);
998 queue_work(kmultipathd, &pgpath->deactivate_path);
999 1009
1000out: 1010out:
1001 spin_unlock_irqrestore(&m->lock, flags); 1011 spin_unlock_irqrestore(&m->lock, flags);
@@ -1034,7 +1044,7 @@ static int reinstate_path(struct pgpath *pgpath)
1034 m->current_pgpath = NULL; 1044 m->current_pgpath = NULL;
1035 queue_work(kmultipathd, &m->process_queued_ios); 1045 queue_work(kmultipathd, &m->process_queued_ios);
1036 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1046 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1037 if (queue_work(kmpath_handlerd, &pgpath->activate_path)) 1047 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1038 m->pg_init_in_progress++; 1048 m->pg_init_in_progress++;
1039 } 1049 }
1040 1050
@@ -1169,6 +1179,7 @@ static void pg_init_done(void *data, int errors)
1169 struct priority_group *pg = pgpath->pg; 1179 struct priority_group *pg = pgpath->pg;
1170 struct multipath *m = pg->m; 1180 struct multipath *m = pg->m;
1171 unsigned long flags; 1181 unsigned long flags;
1182 unsigned delay_retry = 0;
1172 1183
1173 /* device or driver problems */ 1184 /* device or driver problems */
1174 switch (errors) { 1185 switch (errors) {
@@ -1193,8 +1204,9 @@ static void pg_init_done(void *data, int errors)
1193 */ 1204 */
1194 bypass_pg(m, pg, 1); 1205 bypass_pg(m, pg, 1);
1195 break; 1206 break;
1196 /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
1197 case SCSI_DH_RETRY: 1207 case SCSI_DH_RETRY:
1208 /* Wait before retrying. */
1209 delay_retry = 1;
1198 case SCSI_DH_IMM_RETRY: 1210 case SCSI_DH_IMM_RETRY:
1199 case SCSI_DH_RES_TEMP_UNAVAIL: 1211 case SCSI_DH_RES_TEMP_UNAVAIL:
1200 if (pg_init_limit_reached(m, pgpath)) 1212 if (pg_init_limit_reached(m, pgpath))
@@ -1227,6 +1239,7 @@ static void pg_init_done(void *data, int errors)
1227 if (!m->pg_init_required) 1239 if (!m->pg_init_required)
1228 m->queue_io = 0; 1240 m->queue_io = 0;
1229 1241
1242 m->pg_init_delay_retry = delay_retry;
1230 queue_work(kmultipathd, &m->process_queued_ios); 1243 queue_work(kmultipathd, &m->process_queued_ios);
1231 1244
1232 /* 1245 /*
@@ -1241,7 +1254,7 @@ out:
1241static void activate_path(struct work_struct *work) 1254static void activate_path(struct work_struct *work)
1242{ 1255{
1243 struct pgpath *pgpath = 1256 struct pgpath *pgpath =
1244 container_of(work, struct pgpath, activate_path); 1257 container_of(work, struct pgpath, activate_path.work);
1245 1258
1246 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), 1259 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1247 pg_init_done, pgpath); 1260 pg_init_done, pgpath);
@@ -1382,11 +1395,14 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1382 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); 1395 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1383 else { 1396 else {
1384 DMEMIT("%u ", m->queue_if_no_path + 1397 DMEMIT("%u ", m->queue_if_no_path +
1385 (m->pg_init_retries > 0) * 2); 1398 (m->pg_init_retries > 0) * 2 +
1399 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
1386 if (m->queue_if_no_path) 1400 if (m->queue_if_no_path)
1387 DMEMIT("queue_if_no_path "); 1401 DMEMIT("queue_if_no_path ");
1388 if (m->pg_init_retries) 1402 if (m->pg_init_retries)
1389 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1403 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1404 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1405 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1390 } 1406 }
1391 1407
1392 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1408 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1655,7 +1671,7 @@ out:
1655 *---------------------------------------------------------------*/ 1671 *---------------------------------------------------------------*/
1656static struct target_type multipath_target = { 1672static struct target_type multipath_target = {
1657 .name = "multipath", 1673 .name = "multipath",
1658 .version = {1, 1, 1}, 1674 .version = {1, 2, 0},
1659 .module = THIS_MODULE, 1675 .module = THIS_MODULE,
1660 .ctr = multipath_ctr, 1676 .ctr = multipath_ctr,
1661 .dtr = multipath_dtr, 1677 .dtr = multipath_dtr,
@@ -1687,7 +1703,7 @@ static int __init dm_multipath_init(void)
1687 return -EINVAL; 1703 return -EINVAL;
1688 } 1704 }
1689 1705
1690 kmultipathd = create_workqueue("kmpathd"); 1706 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1691 if (!kmultipathd) { 1707 if (!kmultipathd) {
1692 DMERR("failed to create workqueue kmpathd"); 1708 DMERR("failed to create workqueue kmpathd");
1693 dm_unregister_target(&multipath_target); 1709 dm_unregister_target(&multipath_target);
@@ -1701,7 +1717,8 @@ static int __init dm_multipath_init(void)
1701 * old workqueue would also create a bottleneck in the 1717 * old workqueue would also create a bottleneck in the
1702 * path of the storage hardware device activation. 1718 * path of the storage hardware device activation.
1703 */ 1719 */
1704 kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd"); 1720 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1721 WQ_MEM_RECLAIM);
1705 if (!kmpath_handlerd) { 1722 if (!kmpath_handlerd) {
1706 DMERR("failed to create workqueue kmpath_handlerd"); 1723 DMERR("failed to create workqueue kmpath_handlerd");
1707 destroy_workqueue(kmultipathd); 1724 destroy_workqueue(kmultipathd);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
new file mode 100644
index 000000000000..b9e1e15ef11c
--- /dev/null
+++ b/drivers/md/dm-raid.c
@@ -0,0 +1,697 @@
1/*
2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/slab.h>
9
10#include "md.h"
11#include "raid5.h"
12#include "dm.h"
13#include "bitmap.h"
14
15#define DM_MSG_PREFIX "raid"
16
17/*
18 * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
19 * make it so the flag doesn't set anything.
20 */
21#ifndef MD_SYNC_STATE_FORCED
22#define MD_SYNC_STATE_FORCED 0
23#endif
24
25struct raid_dev {
26 /*
27 * Two DM devices, one to hold metadata and one to hold the
28 * actual data/parity. The reason for this is to not confuse
29 * ti->len and give more flexibility in altering size and
30 * characteristics.
31 *
32 * While it is possible for this device to be associated
33 * with a different physical device than the data_dev, it
34 * is intended for it to be the same.
35 * |--------- Physical Device ---------|
36 * |- meta_dev -|------ data_dev ------|
37 */
38 struct dm_dev *meta_dev;
39 struct dm_dev *data_dev;
40 struct mdk_rdev_s rdev;
41};
42
43/*
44 * Flags for rs->print_flags field.
45 */
46#define DMPF_DAEMON_SLEEP 0x1
47#define DMPF_MAX_WRITE_BEHIND 0x2
48#define DMPF_SYNC 0x4
49#define DMPF_NOSYNC 0x8
50#define DMPF_STRIPE_CACHE 0x10
51#define DMPF_MIN_RECOVERY_RATE 0x20
52#define DMPF_MAX_RECOVERY_RATE 0x40
53
54struct raid_set {
55 struct dm_target *ti;
56
57 uint64_t print_flags;
58
59 struct mddev_s md;
60 struct raid_type *raid_type;
61 struct dm_target_callbacks callbacks;
62
63 struct raid_dev dev[0];
64};
65
66/* Supported raid types and properties. */
67static struct raid_type {
68 const char *name; /* RAID algorithm. */
69 const char *descr; /* Descriptor text for logging. */
70 const unsigned parity_devs; /* # of parity devices. */
71 const unsigned minimal_devs; /* minimal # of devices in set. */
72 const unsigned level; /* RAID level. */
73 const unsigned algorithm; /* RAID algorithm. */
74} raid_types[] = {
75 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
76 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
77 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
78 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
79 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
80 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
81 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
82 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
83};
84
85static struct raid_type *get_raid_type(char *name)
86{
87 int i;
88
89 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
90 if (!strcmp(raid_types[i].name, name))
91 return &raid_types[i];
92
93 return NULL;
94}
95
96static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
97{
98 unsigned i;
99 struct raid_set *rs;
100 sector_t sectors_per_dev;
101
102 if (raid_devs <= raid_type->parity_devs) {
103 ti->error = "Insufficient number of devices";
104 return ERR_PTR(-EINVAL);
105 }
106
107 sectors_per_dev = ti->len;
108 if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
109 ti->error = "Target length not divisible by number of data devices";
110 return ERR_PTR(-EINVAL);
111 }
112
113 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
114 if (!rs) {
115 ti->error = "Cannot allocate raid context";
116 return ERR_PTR(-ENOMEM);
117 }
118
119 mddev_init(&rs->md);
120
121 rs->ti = ti;
122 rs->raid_type = raid_type;
123 rs->md.raid_disks = raid_devs;
124 rs->md.level = raid_type->level;
125 rs->md.new_level = rs->md.level;
126 rs->md.dev_sectors = sectors_per_dev;
127 rs->md.layout = raid_type->algorithm;
128 rs->md.new_layout = rs->md.layout;
129 rs->md.delta_disks = 0;
130 rs->md.recovery_cp = 0;
131
132 for (i = 0; i < raid_devs; i++)
133 md_rdev_init(&rs->dev[i].rdev);
134
135 /*
136 * Remaining items to be initialized by further RAID params:
137 * rs->md.persistent
138 * rs->md.external
139 * rs->md.chunk_sectors
140 * rs->md.new_chunk_sectors
141 */
142
143 return rs;
144}
145
146static void context_free(struct raid_set *rs)
147{
148 int i;
149
150 for (i = 0; i < rs->md.raid_disks; i++)
151 if (rs->dev[i].data_dev)
152 dm_put_device(rs->ti, rs->dev[i].data_dev);
153
154 kfree(rs);
155}
156
157/*
158 * For every device we have two words
159 * <meta_dev>: meta device name or '-' if missing
160 * <data_dev>: data device name or '-' if missing
161 *
162 * This code parses those words.
163 */
164static int dev_parms(struct raid_set *rs, char **argv)
165{
166 int i;
167 int rebuild = 0;
168 int metadata_available = 0;
169 int ret = 0;
170
171 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
172 rs->dev[i].rdev.raid_disk = i;
173
174 rs->dev[i].meta_dev = NULL;
175 rs->dev[i].data_dev = NULL;
176
177 /*
178 * There are no offsets, since there is a separate device
179 * for data and metadata.
180 */
181 rs->dev[i].rdev.data_offset = 0;
182 rs->dev[i].rdev.mddev = &rs->md;
183
184 if (strcmp(argv[0], "-")) {
185 rs->ti->error = "Metadata devices not supported";
186 return -EINVAL;
187 }
188
189 if (!strcmp(argv[1], "-")) {
190 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
191 (!rs->dev[i].rdev.recovery_offset)) {
192 rs->ti->error = "Drive designated for rebuild not specified";
193 return -EINVAL;
194 }
195
196 continue;
197 }
198
199 ret = dm_get_device(rs->ti, argv[1],
200 dm_table_get_mode(rs->ti->table),
201 &rs->dev[i].data_dev);
202 if (ret) {
203 rs->ti->error = "RAID device lookup failure";
204 return ret;
205 }
206
207 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
208 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
209 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
210 rebuild++;
211 }
212
213 if (metadata_available) {
214 rs->md.external = 0;
215 rs->md.persistent = 1;
216 rs->md.major_version = 2;
217 } else if (rebuild && !rs->md.recovery_cp) {
218 /*
219 * Without metadata, we will not be able to tell if the array
220 * is in-sync or not - we must assume it is not. Therefore,
221 * it is impossible to rebuild a drive.
222 *
223 * Even if there is metadata, the on-disk information may
224 * indicate that the array is not in-sync and it will then
225 * fail at that time.
226 *
227 * User could specify 'nosync' option if desperate.
228 */
229 DMERR("Unable to rebuild drive while array is not in-sync");
230 rs->ti->error = "RAID device lookup failure";
231 return -EINVAL;
232 }
233
234 return 0;
235}
236
237/*
238 * Possible arguments are...
239 * RAID456:
240 * <chunk_size> [optional_args]
241 *
242 * Optional args:
243 * [[no]sync] Force or prevent recovery of the entire array
244 * [rebuild <idx>] Rebuild the drive indicated by the index
245 * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
246 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
247 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
248 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
249 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
250 */
251static int parse_raid_params(struct raid_set *rs, char **argv,
252 unsigned num_raid_params)
253{
254 unsigned i, rebuild_cnt = 0;
255 unsigned long value;
256 char *key;
257
258 /*
259 * First, parse the in-order required arguments
260 */
261 if ((strict_strtoul(argv[0], 10, &value) < 0) ||
262 !is_power_of_2(value) || (value < 8)) {
263 rs->ti->error = "Bad chunk size";
264 return -EINVAL;
265 }
266
267 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
268 argv++;
269 num_raid_params--;
270
271 /*
272 * Second, parse the unordered optional arguments
273 */
274 for (i = 0; i < rs->md.raid_disks; i++)
275 set_bit(In_sync, &rs->dev[i].rdev.flags);
276
277 for (i = 0; i < num_raid_params; i++) {
278 if (!strcmp(argv[i], "nosync")) {
279 rs->md.recovery_cp = MaxSector;
280 rs->print_flags |= DMPF_NOSYNC;
281 rs->md.flags |= MD_SYNC_STATE_FORCED;
282 continue;
283 }
284 if (!strcmp(argv[i], "sync")) {
285 rs->md.recovery_cp = 0;
286 rs->print_flags |= DMPF_SYNC;
287 rs->md.flags |= MD_SYNC_STATE_FORCED;
288 continue;
289 }
290
291 /* The rest of the optional arguments come in key/value pairs */
292 if ((i + 1) >= num_raid_params) {
293 rs->ti->error = "Wrong number of raid parameters given";
294 return -EINVAL;
295 }
296
297 key = argv[i++];
298 if (strict_strtoul(argv[i], 10, &value) < 0) {
299 rs->ti->error = "Bad numerical argument given in raid params";
300 return -EINVAL;
301 }
302
303 if (!strcmp(key, "rebuild")) {
304 if (++rebuild_cnt > rs->raid_type->parity_devs) {
305 rs->ti->error = "Too many rebuild drives given";
306 return -EINVAL;
307 }
308 if (value > rs->md.raid_disks) {
309 rs->ti->error = "Invalid rebuild index given";
310 return -EINVAL;
311 }
312 clear_bit(In_sync, &rs->dev[value].rdev.flags);
313 rs->dev[value].rdev.recovery_offset = 0;
314 } else if (!strcmp(key, "max_write_behind")) {
315 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
316
317 /*
318 * In device-mapper, we specify things in sectors, but
319 * MD records this value in kB
320 */
321 value /= 2;
322 if (value > COUNTER_MAX) {
323 rs->ti->error = "Max write-behind limit out of range";
324 return -EINVAL;
325 }
326 rs->md.bitmap_info.max_write_behind = value;
327 } else if (!strcmp(key, "daemon_sleep")) {
328 rs->print_flags |= DMPF_DAEMON_SLEEP;
329 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
330 rs->ti->error = "daemon sleep period out of range";
331 return -EINVAL;
332 }
333 rs->md.bitmap_info.daemon_sleep = value;
334 } else if (!strcmp(key, "stripe_cache")) {
335 rs->print_flags |= DMPF_STRIPE_CACHE;
336
337 /*
338 * In device-mapper, we specify things in sectors, but
339 * MD records this value in kB
340 */
341 value /= 2;
342
343 if (rs->raid_type->level < 5) {
344 rs->ti->error = "Inappropriate argument: stripe_cache";
345 return -EINVAL;
346 }
347 if (raid5_set_cache_size(&rs->md, (int)value)) {
348 rs->ti->error = "Bad stripe_cache size";
349 return -EINVAL;
350 }
351 } else if (!strcmp(key, "min_recovery_rate")) {
352 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
353 if (value > INT_MAX) {
354 rs->ti->error = "min_recovery_rate out of range";
355 return -EINVAL;
356 }
357 rs->md.sync_speed_min = (int)value;
358 } else if (!strcmp(key, "max_recovery_rate")) {
359 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
360 if (value > INT_MAX) {
361 rs->ti->error = "max_recovery_rate out of range";
362 return -EINVAL;
363 }
364 rs->md.sync_speed_max = (int)value;
365 } else {
366 DMERR("Unable to parse RAID parameter: %s", key);
367 rs->ti->error = "Unable to parse RAID parameters";
368 return -EINVAL;
369 }
370 }
371
372 /* Assume there are no metadata devices until the drives are parsed */
373 rs->md.persistent = 0;
374 rs->md.external = 1;
375
376 return 0;
377}
378
379static void do_table_event(struct work_struct *ws)
380{
381 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
382
383 dm_table_event(rs->ti->table);
384}
385
386static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
387{
388 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
389
390 return md_raid5_congested(&rs->md, bits);
391}
392
393static void raid_unplug(struct dm_target_callbacks *cb)
394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396
397 md_raid5_unplug_device(rs->md.private);
398}
399
400/*
401 * Construct a RAID4/5/6 mapping:
402 * Args:
403 * <raid_type> <#raid_params> <raid_params> \
404 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
405 *
406 * ** metadata devices are not supported yet, use '-' instead **
407 *
408 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
409 * details on possible <raid_params>.
410 */
411static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
412{
413 int ret;
414 struct raid_type *rt;
415 unsigned long num_raid_params, num_raid_devs;
416 struct raid_set *rs = NULL;
417
418 /* Must have at least <raid_type> <#raid_params> */
419 if (argc < 2) {
420 ti->error = "Too few arguments";
421 return -EINVAL;
422 }
423
424 /* raid type */
425 rt = get_raid_type(argv[0]);
426 if (!rt) {
427 ti->error = "Unrecognised raid_type";
428 return -EINVAL;
429 }
430 argc--;
431 argv++;
432
433 /* number of RAID parameters */
434 if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
435 ti->error = "Cannot understand number of RAID parameters";
436 return -EINVAL;
437 }
438 argc--;
439 argv++;
440
441 /* Skip over RAID params for now and find out # of devices */
442 if (num_raid_params + 1 > argc) {
443 ti->error = "Arguments do not agree with counts given";
444 return -EINVAL;
445 }
446
447 if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
448 (num_raid_devs >= INT_MAX)) {
449 ti->error = "Cannot understand number of raid devices";
450 return -EINVAL;
451 }
452
453 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
454 if (IS_ERR(rs))
455 return PTR_ERR(rs);
456
457 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
458 if (ret)
459 goto bad;
460
461 ret = -EINVAL;
462
463 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
464 argv += num_raid_params + 1;
465
466 if (argc != (num_raid_devs * 2)) {
467 ti->error = "Supplied RAID devices does not match the count given";
468 goto bad;
469 }
470
471 ret = dev_parms(rs, argv);
472 if (ret)
473 goto bad;
474
475 INIT_WORK(&rs->md.event_work, do_table_event);
476 ti->split_io = rs->md.chunk_sectors;
477 ti->private = rs;
478
479 mutex_lock(&rs->md.reconfig_mutex);
480 ret = md_run(&rs->md);
481 rs->md.in_sync = 0; /* Assume already marked dirty */
482 mutex_unlock(&rs->md.reconfig_mutex);
483
484 if (ret) {
485 ti->error = "Fail to run raid array";
486 goto bad;
487 }
488
489 rs->callbacks.congested_fn = raid_is_congested;
490 rs->callbacks.unplug_fn = raid_unplug;
491 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
492
493 return 0;
494
495bad:
496 context_free(rs);
497
498 return ret;
499}
500
501static void raid_dtr(struct dm_target *ti)
502{
503 struct raid_set *rs = ti->private;
504
505 list_del_init(&rs->callbacks.list);
506 md_stop(&rs->md);
507 context_free(rs);
508}
509
510static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
511{
512 struct raid_set *rs = ti->private;
513 mddev_t *mddev = &rs->md;
514
515 mddev->pers->make_request(mddev, bio);
516
517 return DM_MAPIO_SUBMITTED;
518}
519
520static int raid_status(struct dm_target *ti, status_type_t type,
521 char *result, unsigned maxlen)
522{
523 struct raid_set *rs = ti->private;
524 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
525 unsigned sz = 0;
526 int i;
527 sector_t sync;
528
529 switch (type) {
530 case STATUSTYPE_INFO:
531 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
532
533 for (i = 0; i < rs->md.raid_disks; i++) {
534 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
535 DMEMIT("D");
536 else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
537 DMEMIT("A");
538 else
539 DMEMIT("a");
540 }
541
542 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
543 sync = rs->md.curr_resync_completed;
544 else
545 sync = rs->md.recovery_cp;
546
547 if (sync > rs->md.resync_max_sectors)
548 sync = rs->md.resync_max_sectors;
549
550 DMEMIT(" %llu/%llu",
551 (unsigned long long) sync,
552 (unsigned long long) rs->md.resync_max_sectors);
553
554 break;
555 case STATUSTYPE_TABLE:
556 /* The string you would use to construct this array */
557 for (i = 0; i < rs->md.raid_disks; i++)
558 if (rs->dev[i].data_dev &&
559 !test_bit(In_sync, &rs->dev[i].rdev.flags))
560 raid_param_cnt++; /* for rebuilds */
561
562 raid_param_cnt += (hweight64(rs->print_flags) * 2);
563 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
564 raid_param_cnt--;
565
566 DMEMIT("%s %u %u", rs->raid_type->name,
567 raid_param_cnt, rs->md.chunk_sectors);
568
569 if ((rs->print_flags & DMPF_SYNC) &&
570 (rs->md.recovery_cp == MaxSector))
571 DMEMIT(" sync");
572 if (rs->print_flags & DMPF_NOSYNC)
573 DMEMIT(" nosync");
574
575 for (i = 0; i < rs->md.raid_disks; i++)
576 if (rs->dev[i].data_dev &&
577 !test_bit(In_sync, &rs->dev[i].rdev.flags))
578 DMEMIT(" rebuild %u", i);
579
580 if (rs->print_flags & DMPF_DAEMON_SLEEP)
581 DMEMIT(" daemon_sleep %lu",
582 rs->md.bitmap_info.daemon_sleep);
583
584 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
585 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
586
587 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
588 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
589
590 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
591 DMEMIT(" max_write_behind %lu",
592 rs->md.bitmap_info.max_write_behind);
593
594 if (rs->print_flags & DMPF_STRIPE_CACHE) {
595 raid5_conf_t *conf = rs->md.private;
596
597 /* convert from kiB to sectors */
598 DMEMIT(" stripe_cache %d",
599 conf ? conf->max_nr_stripes * 2 : 0);
600 }
601
602 DMEMIT(" %d", rs->md.raid_disks);
603 for (i = 0; i < rs->md.raid_disks; i++) {
604 DMEMIT(" -"); /* metadata device */
605
606 if (rs->dev[i].data_dev)
607 DMEMIT(" %s", rs->dev[i].data_dev->name);
608 else
609 DMEMIT(" -");
610 }
611 }
612
613 return 0;
614}
615
616static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
617{
618 struct raid_set *rs = ti->private;
619 unsigned i;
620 int ret = 0;
621
622 for (i = 0; !ret && i < rs->md.raid_disks; i++)
623 if (rs->dev[i].data_dev)
624 ret = fn(ti,
625 rs->dev[i].data_dev,
626 0, /* No offset on data devs */
627 rs->md.dev_sectors,
628 data);
629
630 return ret;
631}
632
633static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
634{
635 struct raid_set *rs = ti->private;
636 unsigned chunk_size = rs->md.chunk_sectors << 9;
637 raid5_conf_t *conf = rs->md.private;
638
639 blk_limits_io_min(limits, chunk_size);
640 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
641}
642
643static void raid_presuspend(struct dm_target *ti)
644{
645 struct raid_set *rs = ti->private;
646
647 md_stop_writes(&rs->md);
648}
649
650static void raid_postsuspend(struct dm_target *ti)
651{
652 struct raid_set *rs = ti->private;
653
654 mddev_suspend(&rs->md);
655}
656
657static void raid_resume(struct dm_target *ti)
658{
659 struct raid_set *rs = ti->private;
660
661 mddev_resume(&rs->md);
662}
663
664static struct target_type raid_target = {
665 .name = "raid",
666 .version = {1, 0, 0},
667 .module = THIS_MODULE,
668 .ctr = raid_ctr,
669 .dtr = raid_dtr,
670 .map = raid_map,
671 .status = raid_status,
672 .iterate_devices = raid_iterate_devices,
673 .io_hints = raid_io_hints,
674 .presuspend = raid_presuspend,
675 .postsuspend = raid_postsuspend,
676 .resume = raid_resume,
677};
678
679static int __init dm_raid_init(void)
680{
681 return dm_register_target(&raid_target);
682}
683
684static void __exit dm_raid_exit(void)
685{
686 dm_unregister_target(&raid_target);
687}
688
689module_init(dm_raid_init);
690module_exit(dm_raid_exit);
691
692MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
693MODULE_ALIAS("dm-raid4");
694MODULE_ALIAS("dm-raid5");
695MODULE_ALIAS("dm-raid6");
696MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
697MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 19a59b041c27..dee326775c60 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
261 struct dm_io_request io_req = { 261 struct dm_io_request io_req = {
262 .bi_rw = WRITE_FLUSH, 262 .bi_rw = WRITE_FLUSH,
263 .mem.type = DM_IO_KMEM, 263 .mem.type = DM_IO_KMEM,
264 .mem.ptr.bvec = NULL, 264 .mem.ptr.addr = NULL,
265 .client = ms->io_client, 265 .client = ms->io_client,
266 }; 266 };
267 267
@@ -637,6 +637,12 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
637 .client = ms->io_client, 637 .client = ms->io_client,
638 }; 638 };
639 639
640 if (bio->bi_rw & REQ_DISCARD) {
641 io_req.bi_rw |= REQ_DISCARD;
642 io_req.mem.type = DM_IO_KMEM;
643 io_req.mem.ptr.addr = NULL;
644 }
645
640 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) 646 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
641 map_region(dest++, m, bio); 647 map_region(dest++, m, bio);
642 648
@@ -670,7 +676,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
670 bio_list_init(&requeue); 676 bio_list_init(&requeue);
671 677
672 while ((bio = bio_list_pop(writes))) { 678 while ((bio = bio_list_pop(writes))) {
673 if (bio->bi_rw & REQ_FLUSH) { 679 if ((bio->bi_rw & REQ_FLUSH) ||
680 (bio->bi_rw & REQ_DISCARD)) {
674 bio_list_add(&sync, bio); 681 bio_list_add(&sync, bio);
675 continue; 682 continue;
676 } 683 }
@@ -1076,8 +1083,10 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1076 ti->private = ms; 1083 ti->private = ms;
1077 ti->split_io = dm_rh_get_region_size(ms->rh); 1084 ti->split_io = dm_rh_get_region_size(ms->rh);
1078 ti->num_flush_requests = 1; 1085 ti->num_flush_requests = 1;
1086 ti->num_discard_requests = 1;
1079 1087
1080 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); 1088 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1089 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1081 if (!ms->kmirrord_wq) { 1090 if (!ms->kmirrord_wq) {
1082 DMERR("couldn't start kmirrord"); 1091 DMERR("couldn't start kmirrord");
1083 r = -ENOMEM; 1092 r = -ENOMEM;
@@ -1130,7 +1139,7 @@ static void mirror_dtr(struct dm_target *ti)
1130 1139
1131 del_timer_sync(&ms->timer); 1140 del_timer_sync(&ms->timer);
1132 flush_workqueue(ms->kmirrord_wq); 1141 flush_workqueue(ms->kmirrord_wq);
1133 flush_scheduled_work(); 1142 flush_work_sync(&ms->trigger_event);
1134 dm_kcopyd_client_destroy(ms->kcopyd_client); 1143 dm_kcopyd_client_destroy(ms->kcopyd_client);
1135 destroy_workqueue(ms->kmirrord_wq); 1144 destroy_workqueue(ms->kmirrord_wq);
1136 free_context(ms, ti, ms->nr_mirrors); 1145 free_context(ms, ti, ms->nr_mirrors);
@@ -1406,7 +1415,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
1406 1415
1407static struct target_type mirror_target = { 1416static struct target_type mirror_target = {
1408 .name = "mirror", 1417 .name = "mirror",
1409 .version = {1, 12, 0}, 1418 .version = {1, 12, 1},
1410 .module = THIS_MODULE, 1419 .module = THIS_MODULE,
1411 .ctr = mirror_ctr, 1420 .ctr = mirror_ctr,
1412 .dtr = mirror_dtr, 1421 .dtr = mirror_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2129cdb115dc..95891dfcbca0 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
256 */ 256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata); 257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq); 259 flush_work(&req.work);
260 260
261 return req.result; 261 return req.result;
262} 262}
@@ -818,7 +818,7 @@ static int persistent_ctr(struct dm_exception_store *store,
818 atomic_set(&ps->pending_count, 0); 818 atomic_set(&ps->pending_count, 0);
819 ps->callbacks = NULL; 819 ps->callbacks = NULL;
820 820
821 ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); 821 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
822 if (!ps->metadata_wq) { 822 if (!ps->metadata_wq) {
823 kfree(ps); 823 kfree(ps);
824 DMERR("couldn't start header metadata update thread"); 824 DMERR("couldn't start header metadata update thread");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 53cf79d8bcbc..fdde53cd12b7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,7 +19,6 @@
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h> 21#include <linux/dm-kcopyd.h>
22#include <linux/workqueue.h>
23 22
24#include "dm-exception-store.h" 23#include "dm-exception-store.h"
25 24
@@ -80,9 +79,6 @@ struct dm_snapshot {
80 /* Origin writes don't trigger exceptions until this is set */ 79 /* Origin writes don't trigger exceptions until this is set */
81 int active; 80 int active;
82 81
83 /* Whether or not owning mapped_device is suspended */
84 int suspended;
85
86 atomic_t pending_exceptions_count; 82 atomic_t pending_exceptions_count;
87 83
88 mempool_t *pending_pool; 84 mempool_t *pending_pool;
@@ -106,10 +102,6 @@ struct dm_snapshot {
106 102
107 struct dm_kcopyd_client *kcopyd_client; 103 struct dm_kcopyd_client *kcopyd_client;
108 104
109 /* Queue of snapshot writes for ksnapd to flush */
110 struct bio_list queued_bios;
111 struct work_struct queued_bios_work;
112
113 /* Wait for events based on state_bits */ 105 /* Wait for events based on state_bits */
114 unsigned long state_bits; 106 unsigned long state_bits;
115 107
@@ -160,9 +152,6 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
160} 152}
161EXPORT_SYMBOL(dm_snap_cow); 153EXPORT_SYMBOL(dm_snap_cow);
162 154
163static struct workqueue_struct *ksnapd;
164static void flush_queued_bios(struct work_struct *work);
165
166static sector_t chunk_to_sector(struct dm_exception_store *store, 155static sector_t chunk_to_sector(struct dm_exception_store *store,
167 chunk_t chunk) 156 chunk_t chunk)
168{ 157{
@@ -1110,7 +1099,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1110 s->ti = ti; 1099 s->ti = ti;
1111 s->valid = 1; 1100 s->valid = 1;
1112 s->active = 0; 1101 s->active = 0;
1113 s->suspended = 0;
1114 atomic_set(&s->pending_exceptions_count, 0); 1102 atomic_set(&s->pending_exceptions_count, 0);
1115 init_rwsem(&s->lock); 1103 init_rwsem(&s->lock);
1116 INIT_LIST_HEAD(&s->list); 1104 INIT_LIST_HEAD(&s->list);
@@ -1153,9 +1141,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1153 1141
1154 spin_lock_init(&s->tracked_chunk_lock); 1142 spin_lock_init(&s->tracked_chunk_lock);
1155 1143
1156 bio_list_init(&s->queued_bios);
1157 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
1158
1159 ti->private = s; 1144 ti->private = s;
1160 ti->num_flush_requests = num_flush_requests; 1145 ti->num_flush_requests = num_flush_requests;
1161 1146
@@ -1279,8 +1264,6 @@ static void snapshot_dtr(struct dm_target *ti)
1279 struct dm_snapshot *s = ti->private; 1264 struct dm_snapshot *s = ti->private;
1280 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1265 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1281 1266
1282 flush_workqueue(ksnapd);
1283
1284 down_read(&_origins_lock); 1267 down_read(&_origins_lock);
1285 /* Check whether exception handover must be cancelled */ 1268 /* Check whether exception handover must be cancelled */
1286 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); 1269 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
@@ -1342,20 +1325,6 @@ static void flush_bios(struct bio *bio)
1342 } 1325 }
1343} 1326}
1344 1327
1345static void flush_queued_bios(struct work_struct *work)
1346{
1347 struct dm_snapshot *s =
1348 container_of(work, struct dm_snapshot, queued_bios_work);
1349 struct bio *queued_bios;
1350 unsigned long flags;
1351
1352 spin_lock_irqsave(&s->pe_lock, flags);
1353 queued_bios = bio_list_get(&s->queued_bios);
1354 spin_unlock_irqrestore(&s->pe_lock, flags);
1355
1356 flush_bios(queued_bios);
1357}
1358
1359static int do_origin(struct dm_dev *origin, struct bio *bio); 1328static int do_origin(struct dm_dev *origin, struct bio *bio);
1360 1329
1361/* 1330/*
@@ -1760,15 +1729,6 @@ static void snapshot_merge_presuspend(struct dm_target *ti)
1760 stop_merge(s); 1729 stop_merge(s);
1761} 1730}
1762 1731
1763static void snapshot_postsuspend(struct dm_target *ti)
1764{
1765 struct dm_snapshot *s = ti->private;
1766
1767 down_write(&s->lock);
1768 s->suspended = 1;
1769 up_write(&s->lock);
1770}
1771
1772static int snapshot_preresume(struct dm_target *ti) 1732static int snapshot_preresume(struct dm_target *ti)
1773{ 1733{
1774 int r = 0; 1734 int r = 0;
@@ -1783,7 +1743,7 @@ static int snapshot_preresume(struct dm_target *ti)
1783 DMERR("Unable to resume snapshot source until " 1743 DMERR("Unable to resume snapshot source until "
1784 "handover completes."); 1744 "handover completes.");
1785 r = -EINVAL; 1745 r = -EINVAL;
1786 } else if (!snap_src->suspended) { 1746 } else if (!dm_suspended(snap_src->ti)) {
1787 DMERR("Unable to perform snapshot handover until " 1747 DMERR("Unable to perform snapshot handover until "
1788 "source is suspended."); 1748 "source is suspended.");
1789 r = -EINVAL; 1749 r = -EINVAL;
@@ -1816,7 +1776,6 @@ static void snapshot_resume(struct dm_target *ti)
1816 1776
1817 down_write(&s->lock); 1777 down_write(&s->lock);
1818 s->active = 1; 1778 s->active = 1;
1819 s->suspended = 0;
1820 up_write(&s->lock); 1779 up_write(&s->lock);
1821} 1780}
1822 1781
@@ -2194,7 +2153,7 @@ static int origin_iterate_devices(struct dm_target *ti,
2194 2153
2195static struct target_type origin_target = { 2154static struct target_type origin_target = {
2196 .name = "snapshot-origin", 2155 .name = "snapshot-origin",
2197 .version = {1, 7, 0}, 2156 .version = {1, 7, 1},
2198 .module = THIS_MODULE, 2157 .module = THIS_MODULE,
2199 .ctr = origin_ctr, 2158 .ctr = origin_ctr,
2200 .dtr = origin_dtr, 2159 .dtr = origin_dtr,
@@ -2207,13 +2166,12 @@ static struct target_type origin_target = {
2207 2166
2208static struct target_type snapshot_target = { 2167static struct target_type snapshot_target = {
2209 .name = "snapshot", 2168 .name = "snapshot",
2210 .version = {1, 9, 0}, 2169 .version = {1, 10, 0},
2211 .module = THIS_MODULE, 2170 .module = THIS_MODULE,
2212 .ctr = snapshot_ctr, 2171 .ctr = snapshot_ctr,
2213 .dtr = snapshot_dtr, 2172 .dtr = snapshot_dtr,
2214 .map = snapshot_map, 2173 .map = snapshot_map,
2215 .end_io = snapshot_end_io, 2174 .end_io = snapshot_end_io,
2216 .postsuspend = snapshot_postsuspend,
2217 .preresume = snapshot_preresume, 2175 .preresume = snapshot_preresume,
2218 .resume = snapshot_resume, 2176 .resume = snapshot_resume,
2219 .status = snapshot_status, 2177 .status = snapshot_status,
@@ -2222,14 +2180,13 @@ static struct target_type snapshot_target = {
2222 2180
2223static struct target_type merge_target = { 2181static struct target_type merge_target = {
2224 .name = dm_snapshot_merge_target_name, 2182 .name = dm_snapshot_merge_target_name,
2225 .version = {1, 0, 0}, 2183 .version = {1, 1, 0},
2226 .module = THIS_MODULE, 2184 .module = THIS_MODULE,
2227 .ctr = snapshot_ctr, 2185 .ctr = snapshot_ctr,
2228 .dtr = snapshot_dtr, 2186 .dtr = snapshot_dtr,
2229 .map = snapshot_merge_map, 2187 .map = snapshot_merge_map,
2230 .end_io = snapshot_end_io, 2188 .end_io = snapshot_end_io,
2231 .presuspend = snapshot_merge_presuspend, 2189 .presuspend = snapshot_merge_presuspend,
2232 .postsuspend = snapshot_postsuspend,
2233 .preresume = snapshot_preresume, 2190 .preresume = snapshot_preresume,
2234 .resume = snapshot_merge_resume, 2191 .resume = snapshot_merge_resume,
2235 .status = snapshot_status, 2192 .status = snapshot_status,
@@ -2291,17 +2248,8 @@ static int __init dm_snapshot_init(void)
2291 goto bad_tracked_chunk_cache; 2248 goto bad_tracked_chunk_cache;
2292 } 2249 }
2293 2250
2294 ksnapd = create_singlethread_workqueue("ksnapd");
2295 if (!ksnapd) {
2296 DMERR("Failed to create ksnapd workqueue.");
2297 r = -ENOMEM;
2298 goto bad_pending_pool;
2299 }
2300
2301 return 0; 2251 return 0;
2302 2252
2303bad_pending_pool:
2304 kmem_cache_destroy(tracked_chunk_cache);
2305bad_tracked_chunk_cache: 2253bad_tracked_chunk_cache:
2306 kmem_cache_destroy(pending_cache); 2254 kmem_cache_destroy(pending_cache);
2307bad_pending_cache: 2255bad_pending_cache:
@@ -2322,8 +2270,6 @@ bad_register_snapshot_target:
2322 2270
2323static void __exit dm_snapshot_exit(void) 2271static void __exit dm_snapshot_exit(void)
2324{ 2272{
2325 destroy_workqueue(ksnapd);
2326
2327 dm_unregister_target(&snapshot_target); 2273 dm_unregister_target(&snapshot_target);
2328 dm_unregister_target(&origin_target); 2274 dm_unregister_target(&origin_target);
2329 dm_unregister_target(&merge_target); 2275 dm_unregister_target(&merge_target);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index f0371b4c4fbf..dddfa14f2982 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -39,23 +39,20 @@ struct stripe_c {
39 struct dm_target *ti; 39 struct dm_target *ti;
40 40
41 /* Work struct used for triggering events*/ 41 /* Work struct used for triggering events*/
42 struct work_struct kstriped_ws; 42 struct work_struct trigger_event;
43 43
44 struct stripe stripe[0]; 44 struct stripe stripe[0];
45}; 45};
46 46
47static struct workqueue_struct *kstriped;
48
49/* 47/*
50 * An event is triggered whenever a drive 48 * An event is triggered whenever a drive
51 * drops out of a stripe volume. 49 * drops out of a stripe volume.
52 */ 50 */
53static void trigger_event(struct work_struct *work) 51static void trigger_event(struct work_struct *work)
54{ 52{
55 struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws); 53 struct stripe_c *sc = container_of(work, struct stripe_c,
56 54 trigger_event);
57 dm_table_event(sc->ti->table); 55 dm_table_event(sc->ti->table);
58
59} 56}
60 57
61static inline struct stripe_c *alloc_context(unsigned int stripes) 58static inline struct stripe_c *alloc_context(unsigned int stripes)
@@ -160,7 +157,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
160 return -ENOMEM; 157 return -ENOMEM;
161 } 158 }
162 159
163 INIT_WORK(&sc->kstriped_ws, trigger_event); 160 INIT_WORK(&sc->trigger_event, trigger_event);
164 161
165 /* Set pointer to dm target; used in trigger_event */ 162 /* Set pointer to dm target; used in trigger_event */
166 sc->ti = ti; 163 sc->ti = ti;
@@ -211,7 +208,7 @@ static void stripe_dtr(struct dm_target *ti)
211 for (i = 0; i < sc->stripes; i++) 208 for (i = 0; i < sc->stripes; i++)
212 dm_put_device(ti, sc->stripe[i].dev); 209 dm_put_device(ti, sc->stripe[i].dev);
213 210
214 flush_workqueue(kstriped); 211 flush_work_sync(&sc->trigger_event);
215 kfree(sc); 212 kfree(sc);
216} 213}
217 214
@@ -367,7 +364,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
367 atomic_inc(&(sc->stripe[i].error_count)); 364 atomic_inc(&(sc->stripe[i].error_count));
368 if (atomic_read(&(sc->stripe[i].error_count)) < 365 if (atomic_read(&(sc->stripe[i].error_count)) <
369 DM_IO_ERROR_THRESHOLD) 366 DM_IO_ERROR_THRESHOLD)
370 queue_work(kstriped, &sc->kstriped_ws); 367 schedule_work(&sc->trigger_event);
371 } 368 }
372 369
373 return error; 370 return error;
@@ -401,7 +398,7 @@ static void stripe_io_hints(struct dm_target *ti,
401 398
402static struct target_type stripe_target = { 399static struct target_type stripe_target = {
403 .name = "striped", 400 .name = "striped",
404 .version = {1, 3, 0}, 401 .version = {1, 3, 1},
405 .module = THIS_MODULE, 402 .module = THIS_MODULE,
406 .ctr = stripe_ctr, 403 .ctr = stripe_ctr,
407 .dtr = stripe_dtr, 404 .dtr = stripe_dtr,
@@ -422,20 +419,10 @@ int __init dm_stripe_init(void)
422 return r; 419 return r;
423 } 420 }
424 421
425 kstriped = create_singlethread_workqueue("kstriped");
426 if (!kstriped) {
427 DMERR("failed to create workqueue kstriped");
428 dm_unregister_target(&stripe_target);
429 return -ENOMEM;
430 }
431
432 return r; 422 return r;
433} 423}
434 424
435void dm_stripe_exit(void) 425void dm_stripe_exit(void)
436{ 426{
437 dm_unregister_target(&stripe_target); 427 dm_unregister_target(&stripe_target);
438 destroy_workqueue(kstriped);
439
440 return;
441} 428}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 985c20a4f30e..38e4eb1bb965 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -71,6 +71,8 @@ struct dm_table {
71 void *event_context; 71 void *event_context;
72 72
73 struct dm_md_mempools *mempools; 73 struct dm_md_mempools *mempools;
74
75 struct list_head target_callbacks;
74}; 76};
75 77
76/* 78/*
@@ -204,6 +206,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
204 return -ENOMEM; 206 return -ENOMEM;
205 207
206 INIT_LIST_HEAD(&t->devices); 208 INIT_LIST_HEAD(&t->devices);
209 INIT_LIST_HEAD(&t->target_callbacks);
207 atomic_set(&t->holders, 0); 210 atomic_set(&t->holders, 0);
208 t->discards_supported = 1; 211 t->discards_supported = 1;
209 212
@@ -347,6 +350,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
347 if (!d->dm_dev.bdev) 350 if (!d->dm_dev.bdev)
348 return; 351 return;
349 352
353 bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
350 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL); 354 blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
351 d->dm_dev.bdev = NULL; 355 d->dm_dev.bdev = NULL;
352} 356}
@@ -1225,10 +1229,17 @@ int dm_table_resume_targets(struct dm_table *t)
1225 return 0; 1229 return 0;
1226} 1230}
1227 1231
1232void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1233{
1234 list_add(&cb->list, &t->target_callbacks);
1235}
1236EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1237
1228int dm_table_any_congested(struct dm_table *t, int bdi_bits) 1238int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1229{ 1239{
1230 struct dm_dev_internal *dd; 1240 struct dm_dev_internal *dd;
1231 struct list_head *devices = dm_table_get_devices(t); 1241 struct list_head *devices = dm_table_get_devices(t);
1242 struct dm_target_callbacks *cb;
1232 int r = 0; 1243 int r = 0;
1233 1244
1234 list_for_each_entry(dd, devices, list) { 1245 list_for_each_entry(dd, devices, list) {
@@ -1243,6 +1254,10 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1243 bdevname(dd->dm_dev.bdev, b)); 1254 bdevname(dd->dm_dev.bdev, b));
1244 } 1255 }
1245 1256
1257 list_for_each_entry(cb, &t->target_callbacks, list)
1258 if (cb->congested_fn)
1259 r |= cb->congested_fn(cb, bdi_bits);
1260
1246 return r; 1261 return r;
1247} 1262}
1248 1263
@@ -1264,6 +1279,7 @@ void dm_table_unplug_all(struct dm_table *t)
1264{ 1279{
1265 struct dm_dev_internal *dd; 1280 struct dm_dev_internal *dd;
1266 struct list_head *devices = dm_table_get_devices(t); 1281 struct list_head *devices = dm_table_get_devices(t);
1282 struct dm_target_callbacks *cb;
1267 1283
1268 list_for_each_entry(dd, devices, list) { 1284 list_for_each_entry(dd, devices, list) {
1269 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); 1285 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
@@ -1276,6 +1292,10 @@ void dm_table_unplug_all(struct dm_table *t)
1276 dm_device_name(t->md), 1292 dm_device_name(t->md),
1277 bdevname(dd->dm_dev.bdev, b)); 1293 bdevname(dd->dm_dev.bdev, b));
1278 } 1294 }
1295
1296 list_for_each_entry(cb, &t->target_callbacks, list)
1297 if (cb->unplug_fn)
1298 cb->unplug_fn(cb);
1279} 1299}
1280 1300
1281struct mapped_device *dm_table_get_md(struct dm_table *t) 1301struct mapped_device *dm_table_get_md(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f48a2f359ac4..eaa3af0e0632 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -32,7 +32,6 @@
32#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" 32#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
33#define DM_COOKIE_LENGTH 24 33#define DM_COOKIE_LENGTH 24
34 34
35static DEFINE_MUTEX(dm_mutex);
36static const char *_name = DM_NAME; 35static const char *_name = DM_NAME;
37 36
38static unsigned int major = 0; 37static unsigned int major = 0;
@@ -328,7 +327,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
328{ 327{
329 struct mapped_device *md; 328 struct mapped_device *md;
330 329
331 mutex_lock(&dm_mutex);
332 spin_lock(&_minor_lock); 330 spin_lock(&_minor_lock);
333 331
334 md = bdev->bd_disk->private_data; 332 md = bdev->bd_disk->private_data;
@@ -346,7 +344,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
346 344
347out: 345out:
348 spin_unlock(&_minor_lock); 346 spin_unlock(&_minor_lock);
349 mutex_unlock(&dm_mutex);
350 347
351 return md ? 0 : -ENXIO; 348 return md ? 0 : -ENXIO;
352} 349}
@@ -355,10 +352,12 @@ static int dm_blk_close(struct gendisk *disk, fmode_t mode)
355{ 352{
356 struct mapped_device *md = disk->private_data; 353 struct mapped_device *md = disk->private_data;
357 354
358 mutex_lock(&dm_mutex); 355 spin_lock(&_minor_lock);
356
359 atomic_dec(&md->open_count); 357 atomic_dec(&md->open_count);
360 dm_put(md); 358 dm_put(md);
361 mutex_unlock(&dm_mutex); 359
360 spin_unlock(&_minor_lock);
362 361
363 return 0; 362 return 0;
364} 363}
@@ -1638,13 +1637,15 @@ static void dm_request_fn(struct request_queue *q)
1638 if (map_request(ti, clone, md)) 1637 if (map_request(ti, clone, md))
1639 goto requeued; 1638 goto requeued;
1640 1639
1641 spin_lock_irq(q->queue_lock); 1640 BUG_ON(!irqs_disabled());
1641 spin_lock(q->queue_lock);
1642 } 1642 }
1643 1643
1644 goto out; 1644 goto out;
1645 1645
1646requeued: 1646requeued:
1647 spin_lock_irq(q->queue_lock); 1647 BUG_ON(!irqs_disabled());
1648 spin_lock(q->queue_lock);
1648 1649
1649plug_and_out: 1650plug_and_out:
1650 if (!elv_queue_empty(q)) 1651 if (!elv_queue_empty(q))
@@ -1884,7 +1885,8 @@ static struct mapped_device *alloc_dev(int minor)
1884 add_disk(md->disk); 1885 add_disk(md->disk);
1885 format_dev_t(md->name, MKDEV(_major, minor)); 1886 format_dev_t(md->name, MKDEV(_major, minor));
1886 1887
1887 md->wq = create_singlethread_workqueue("kdmflush"); 1888 md->wq = alloc_workqueue("kdmflush",
1889 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1888 if (!md->wq) 1890 if (!md->wq)
1889 goto bad_thread; 1891 goto bad_thread;
1890 1892
@@ -1992,13 +1994,14 @@ static void event_callback(void *context)
1992 wake_up(&md->eventq); 1994 wake_up(&md->eventq);
1993} 1995}
1994 1996
1997/*
1998 * Protected by md->suspend_lock obtained by dm_swap_table().
1999 */
1995static void __set_size(struct mapped_device *md, sector_t size) 2000static void __set_size(struct mapped_device *md, sector_t size)
1996{ 2001{
1997 set_capacity(md->disk, size); 2002 set_capacity(md->disk, size);
1998 2003
1999 mutex_lock(&md->bdev->bd_inode->i_mutex);
2000 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 2004 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2001 mutex_unlock(&md->bdev->bd_inode->i_mutex);
2002} 2005}
2003 2006
2004/* 2007/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7fc090ac9e28..0cc30ecda4c1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,11 +287,14 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
287 mddev_t *mddev = q->queuedata; 287 mddev_t *mddev = q->queuedata;
288 int rv; 288 int rv;
289 int cpu; 289 int cpu;
290 unsigned int sectors;
290 291
291 if (mddev == NULL || mddev->pers == NULL) { 292 if (mddev == NULL || mddev->pers == NULL
293 || !mddev->ready) {
292 bio_io_error(bio); 294 bio_io_error(bio);
293 return 0; 295 return 0;
294 } 296 }
297 smp_rmb(); /* Ensure implications of 'active' are visible */
295 rcu_read_lock(); 298 rcu_read_lock();
296 if (mddev->suspended) { 299 if (mddev->suspended) {
297 DEFINE_WAIT(__wait); 300 DEFINE_WAIT(__wait);
@@ -309,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
309 atomic_inc(&mddev->active_io); 312 atomic_inc(&mddev->active_io);
310 rcu_read_unlock(); 313 rcu_read_unlock();
311 314
315 /*
316 * save the sectors now since our bio can
317 * go away inside make_request
318 */
319 sectors = bio_sectors(bio);
312 rv = mddev->pers->make_request(mddev, bio); 320 rv = mddev->pers->make_request(mddev, bio);
313 321
314 cpu = part_stat_lock(); 322 cpu = part_stat_lock();
315 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 323 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
316 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 324 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
317 bio_sectors(bio));
318 part_stat_unlock(); 325 part_stat_unlock();
319 326
320 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 327 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -703,9 +710,9 @@ static struct mdk_personality *find_pers(int level, char *clevel)
703} 710}
704 711
705/* return the offset of the super block in 512byte sectors */ 712/* return the offset of the super block in 512byte sectors */
706static inline sector_t calc_dev_sboffset(struct block_device *bdev) 713static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
707{ 714{
708 sector_t num_sectors = i_size_read(bdev->bd_inode) / 512; 715 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
709 return MD_NEW_SIZE_SECTORS(num_sectors); 716 return MD_NEW_SIZE_SECTORS(num_sectors);
710} 717}
711 718
@@ -763,7 +770,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
763 */ 770 */
764 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 771 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
765 772
766 bio->bi_bdev = rdev->bdev; 773 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
767 bio->bi_sector = sector; 774 bio->bi_sector = sector;
768 bio_add_page(bio, page, size, 0); 775 bio_add_page(bio, page, size, 0);
769 bio->bi_private = rdev; 776 bio->bi_private = rdev;
@@ -793,7 +800,7 @@ static void bi_complete(struct bio *bio, int error)
793} 800}
794 801
795int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 802int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
796 struct page *page, int rw) 803 struct page *page, int rw, bool metadata_op)
797{ 804{
798 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 805 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
799 struct completion event; 806 struct completion event;
@@ -801,8 +808,12 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
801 808
802 rw |= REQ_SYNC | REQ_UNPLUG; 809 rw |= REQ_SYNC | REQ_UNPLUG;
803 810
804 bio->bi_bdev = rdev->bdev; 811 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
805 bio->bi_sector = sector; 812 rdev->meta_bdev : rdev->bdev;
813 if (metadata_op)
814 bio->bi_sector = sector + rdev->sb_start;
815 else
816 bio->bi_sector = sector + rdev->data_offset;
806 bio_add_page(bio, page, size, 0); 817 bio_add_page(bio, page, size, 0);
807 init_completion(&event); 818 init_completion(&event);
808 bio->bi_private = &event; 819 bio->bi_private = &event;
@@ -827,7 +838,7 @@ static int read_disk_sb(mdk_rdev_t * rdev, int size)
827 return 0; 838 return 0;
828 839
829 840
830 if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ)) 841 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
831 goto fail; 842 goto fail;
832 rdev->sb_loaded = 1; 843 rdev->sb_loaded = 1;
833 return 0; 844 return 0;
@@ -989,7 +1000,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
989 * 1000 *
990 * It also happens to be a multiple of 4Kb. 1001 * It also happens to be a multiple of 4Kb.
991 */ 1002 */
992 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1003 rdev->sb_start = calc_dev_sboffset(rdev);
993 1004
994 ret = read_disk_sb(rdev, MD_SB_BYTES); 1005 ret = read_disk_sb(rdev, MD_SB_BYTES);
995 if (ret) return ret; 1006 if (ret) return ret;
@@ -1330,7 +1341,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1330 return 0; /* component must fit device */ 1341 return 0; /* component must fit device */
1331 if (rdev->mddev->bitmap_info.offset) 1342 if (rdev->mddev->bitmap_info.offset)
1332 return 0; /* can't move bitmap */ 1343 return 0; /* can't move bitmap */
1333 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1344 rdev->sb_start = calc_dev_sboffset(rdev);
1334 if (!num_sectors || num_sectors > rdev->sb_start) 1345 if (!num_sectors || num_sectors > rdev->sb_start)
1335 num_sectors = rdev->sb_start; 1346 num_sectors = rdev->sb_start;
1336 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1347 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1906,6 +1917,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1906 MD_BUG(); 1917 MD_BUG();
1907 return; 1918 return;
1908 } 1919 }
1920 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
1909 list_del_rcu(&rdev->same_set); 1921 list_del_rcu(&rdev->same_set);
1910 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1922 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1911 rdev->mddev = NULL; 1923 rdev->mddev = NULL;
@@ -1940,8 +1952,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1940 __bdevname(dev, b)); 1952 __bdevname(dev, b));
1941 return PTR_ERR(bdev); 1953 return PTR_ERR(bdev);
1942 } 1954 }
1943 if (!shared)
1944 set_bit(AllReserved, &rdev->flags);
1945 rdev->bdev = bdev; 1955 rdev->bdev = bdev;
1946 return err; 1956 return err;
1947} 1957}
@@ -2458,6 +2468,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2458 if (rdev->raid_disk != -1) 2468 if (rdev->raid_disk != -1)
2459 return -EBUSY; 2469 return -EBUSY;
2460 2470
2471 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2472 return -EBUSY;
2473
2461 if (rdev->mddev->pers->hot_add_disk == NULL) 2474 if (rdev->mddev->pers->hot_add_disk == NULL)
2462 return -EINVAL; 2475 return -EINVAL;
2463 2476
@@ -2465,6 +2478,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2465 if (rdev2->raid_disk == slot) 2478 if (rdev2->raid_disk == slot)
2466 return -EEXIST; 2479 return -EEXIST;
2467 2480
2481 if (slot >= rdev->mddev->raid_disks &&
2482 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2483 return -ENOSPC;
2484
2468 rdev->raid_disk = slot; 2485 rdev->raid_disk = slot;
2469 if (test_bit(In_sync, &rdev->flags)) 2486 if (test_bit(In_sync, &rdev->flags))
2470 rdev->saved_raid_disk = slot; 2487 rdev->saved_raid_disk = slot;
@@ -2482,7 +2499,8 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2482 /* failure here is OK */; 2499 /* failure here is OK */;
2483 /* don't wakeup anyone, leave that to userspace. */ 2500 /* don't wakeup anyone, leave that to userspace. */
2484 } else { 2501 } else {
2485 if (slot >= rdev->mddev->raid_disks) 2502 if (slot >= rdev->mddev->raid_disks &&
2503 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2486 return -ENOSPC; 2504 return -ENOSPC;
2487 rdev->raid_disk = slot; 2505 rdev->raid_disk = slot;
2488 /* assume it is working */ 2506 /* assume it is working */
@@ -2598,12 +2616,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2598 2616
2599 mddev_lock(mddev); 2617 mddev_lock(mddev);
2600 list_for_each_entry(rdev2, &mddev->disks, same_set) 2618 list_for_each_entry(rdev2, &mddev->disks, same_set)
2601 if (test_bit(AllReserved, &rdev2->flags) || 2619 if (rdev->bdev == rdev2->bdev &&
2602 (rdev->bdev == rdev2->bdev && 2620 rdev != rdev2 &&
2603 rdev != rdev2 && 2621 overlaps(rdev->data_offset, rdev->sectors,
2604 overlaps(rdev->data_offset, rdev->sectors, 2622 rdev2->data_offset,
2605 rdev2->data_offset, 2623 rdev2->sectors)) {
2606 rdev2->sectors))) {
2607 overlap = 1; 2624 overlap = 1;
2608 break; 2625 break;
2609 } 2626 }
@@ -3107,7 +3124,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3107 char nm[20]; 3124 char nm[20];
3108 if (rdev->raid_disk < 0) 3125 if (rdev->raid_disk < 0)
3109 continue; 3126 continue;
3110 if (rdev->new_raid_disk > mddev->raid_disks) 3127 if (rdev->new_raid_disk >= mddev->raid_disks)
3111 rdev->new_raid_disk = -1; 3128 rdev->new_raid_disk = -1;
3112 if (rdev->new_raid_disk == rdev->raid_disk) 3129 if (rdev->new_raid_disk == rdev->raid_disk)
3113 continue; 3130 continue;
@@ -3736,6 +3753,8 @@ action_show(mddev_t *mddev, char *page)
3736 return sprintf(page, "%s\n", type); 3753 return sprintf(page, "%s\n", type);
3737} 3754}
3738 3755
3756static void reap_sync_thread(mddev_t *mddev);
3757
3739static ssize_t 3758static ssize_t
3740action_store(mddev_t *mddev, const char *page, size_t len) 3759action_store(mddev_t *mddev, const char *page, size_t len)
3741{ 3760{
@@ -3750,9 +3769,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
3750 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 3769 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3751 if (mddev->sync_thread) { 3770 if (mddev->sync_thread) {
3752 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3771 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3753 md_unregister_thread(mddev->sync_thread); 3772 reap_sync_thread(mddev);
3754 mddev->sync_thread = NULL;
3755 mddev->recovery = 0;
3756 } 3773 }
3757 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3774 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3758 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3775 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -3904,7 +3921,7 @@ static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3904static ssize_t 3921static ssize_t
3905sync_completed_show(mddev_t *mddev, char *page) 3922sync_completed_show(mddev_t *mddev, char *page)
3906{ 3923{
3907 unsigned long max_sectors, resync; 3924 unsigned long long max_sectors, resync;
3908 3925
3909 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3926 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3910 return sprintf(page, "none\n"); 3927 return sprintf(page, "none\n");
@@ -3915,7 +3932,7 @@ sync_completed_show(mddev_t *mddev, char *page)
3915 max_sectors = mddev->dev_sectors; 3932 max_sectors = mddev->dev_sectors;
3916 3933
3917 resync = mddev->curr_resync_completed; 3934 resync = mddev->curr_resync_completed;
3918 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3935 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
3919} 3936}
3920 3937
3921static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3938static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
@@ -4002,19 +4019,24 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
4002{ 4019{
4003 char *e; 4020 char *e;
4004 unsigned long long new = simple_strtoull(buf, &e, 10); 4021 unsigned long long new = simple_strtoull(buf, &e, 10);
4022 unsigned long long old = mddev->suspend_lo;
4005 4023
4006 if (mddev->pers == NULL || 4024 if (mddev->pers == NULL ||
4007 mddev->pers->quiesce == NULL) 4025 mddev->pers->quiesce == NULL)
4008 return -EINVAL; 4026 return -EINVAL;
4009 if (buf == e || (*e && *e != '\n')) 4027 if (buf == e || (*e && *e != '\n'))
4010 return -EINVAL; 4028 return -EINVAL;
4011 if (new >= mddev->suspend_hi || 4029
4012 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 4030 mddev->suspend_lo = new;
4013 mddev->suspend_lo = new; 4031 if (new >= old)
4032 /* Shrinking suspended region */
4014 mddev->pers->quiesce(mddev, 2); 4033 mddev->pers->quiesce(mddev, 2);
4015 return len; 4034 else {
4016 } else 4035 /* Expanding suspended region - need to wait */
4017 return -EINVAL; 4036 mddev->pers->quiesce(mddev, 1);
4037 mddev->pers->quiesce(mddev, 0);
4038 }
4039 return len;
4018} 4040}
4019static struct md_sysfs_entry md_suspend_lo = 4041static struct md_sysfs_entry md_suspend_lo =
4020__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4042__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
@@ -4031,20 +4053,24 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
4031{ 4053{
4032 char *e; 4054 char *e;
4033 unsigned long long new = simple_strtoull(buf, &e, 10); 4055 unsigned long long new = simple_strtoull(buf, &e, 10);
4056 unsigned long long old = mddev->suspend_hi;
4034 4057
4035 if (mddev->pers == NULL || 4058 if (mddev->pers == NULL ||
4036 mddev->pers->quiesce == NULL) 4059 mddev->pers->quiesce == NULL)
4037 return -EINVAL; 4060 return -EINVAL;
4038 if (buf == e || (*e && *e != '\n')) 4061 if (buf == e || (*e && *e != '\n'))
4039 return -EINVAL; 4062 return -EINVAL;
4040 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 4063
4041 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 4064 mddev->suspend_hi = new;
4042 mddev->suspend_hi = new; 4065 if (new <= old)
4066 /* Shrinking suspended region */
4067 mddev->pers->quiesce(mddev, 2);
4068 else {
4069 /* Expanding suspended region - need to wait */
4043 mddev->pers->quiesce(mddev, 1); 4070 mddev->pers->quiesce(mddev, 1);
4044 mddev->pers->quiesce(mddev, 0); 4071 mddev->pers->quiesce(mddev, 0);
4045 return len; 4072 }
4046 } else 4073 return len;
4047 return -EINVAL;
4048} 4074}
4049static struct md_sysfs_entry md_suspend_hi = 4075static struct md_sysfs_entry md_suspend_hi =
4050__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4076__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
@@ -4422,7 +4448,9 @@ int md_run(mddev_t *mddev)
4422 * We don't want the data to overlap the metadata, 4448 * We don't want the data to overlap the metadata,
4423 * Internal Bitmap issues have been handled elsewhere. 4449 * Internal Bitmap issues have been handled elsewhere.
4424 */ 4450 */
4425 if (rdev->data_offset < rdev->sb_start) { 4451 if (rdev->meta_bdev) {
4452 /* Nothing to check */;
4453 } else if (rdev->data_offset < rdev->sb_start) {
4426 if (mddev->dev_sectors && 4454 if (mddev->dev_sectors &&
4427 rdev->data_offset + mddev->dev_sectors 4455 rdev->data_offset + mddev->dev_sectors
4428 > rdev->sb_start) { 4456 > rdev->sb_start) {
@@ -4556,7 +4584,8 @@ int md_run(mddev_t *mddev)
4556 mddev->safemode_timer.data = (unsigned long) mddev; 4584 mddev->safemode_timer.data = (unsigned long) mddev;
4557 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4585 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4558 mddev->in_sync = 1; 4586 mddev->in_sync = 1;
4559 4587 smp_wmb();
4588 mddev->ready = 1;
4560 list_for_each_entry(rdev, &mddev->disks, same_set) 4589 list_for_each_entry(rdev, &mddev->disks, same_set)
4561 if (rdev->raid_disk >= 0) { 4590 if (rdev->raid_disk >= 0) {
4562 char nm[20]; 4591 char nm[20];
@@ -4693,13 +4722,12 @@ static void md_clean(mddev_t *mddev)
4693 mddev->plug = NULL; 4722 mddev->plug = NULL;
4694} 4723}
4695 4724
4696void md_stop_writes(mddev_t *mddev) 4725static void __md_stop_writes(mddev_t *mddev)
4697{ 4726{
4698 if (mddev->sync_thread) { 4727 if (mddev->sync_thread) {
4699 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4728 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4700 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4729 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4701 md_unregister_thread(mddev->sync_thread); 4730 reap_sync_thread(mddev);
4702 mddev->sync_thread = NULL;
4703 } 4731 }
4704 4732
4705 del_timer_sync(&mddev->safemode_timer); 4733 del_timer_sync(&mddev->safemode_timer);
@@ -4713,10 +4741,18 @@ void md_stop_writes(mddev_t *mddev)
4713 md_update_sb(mddev, 1); 4741 md_update_sb(mddev, 1);
4714 } 4742 }
4715} 4743}
4744
4745void md_stop_writes(mddev_t *mddev)
4746{
4747 mddev_lock(mddev);
4748 __md_stop_writes(mddev);
4749 mddev_unlock(mddev);
4750}
4716EXPORT_SYMBOL_GPL(md_stop_writes); 4751EXPORT_SYMBOL_GPL(md_stop_writes);
4717 4752
4718void md_stop(mddev_t *mddev) 4753void md_stop(mddev_t *mddev)
4719{ 4754{
4755 mddev->ready = 0;
4720 mddev->pers->stop(mddev); 4756 mddev->pers->stop(mddev);
4721 if (mddev->pers->sync_request && mddev->to_remove == NULL) 4757 if (mddev->pers->sync_request && mddev->to_remove == NULL)
4722 mddev->to_remove = &md_redundancy_group; 4758 mddev->to_remove = &md_redundancy_group;
@@ -4736,7 +4772,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
4736 goto out; 4772 goto out;
4737 } 4773 }
4738 if (mddev->pers) { 4774 if (mddev->pers) {
4739 md_stop_writes(mddev); 4775 __md_stop_writes(mddev);
4740 4776
4741 err = -ENXIO; 4777 err = -ENXIO;
4742 if (mddev->ro==1) 4778 if (mddev->ro==1)
@@ -4773,7 +4809,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4773 if (mddev->ro) 4809 if (mddev->ro)
4774 set_disk_ro(disk, 0); 4810 set_disk_ro(disk, 0);
4775 4811
4776 md_stop_writes(mddev); 4812 __md_stop_writes(mddev);
4777 md_stop(mddev); 4813 md_stop(mddev);
4778 mddev->queue->merge_bvec_fn = NULL; 4814 mddev->queue->merge_bvec_fn = NULL;
4779 mddev->queue->unplug_fn = NULL; 4815 mddev->queue->unplug_fn = NULL;
@@ -5151,9 +5187,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5151 /* set saved_raid_disk if appropriate */ 5187 /* set saved_raid_disk if appropriate */
5152 if (!mddev->persistent) { 5188 if (!mddev->persistent) {
5153 if (info->state & (1<<MD_DISK_SYNC) && 5189 if (info->state & (1<<MD_DISK_SYNC) &&
5154 info->raid_disk < mddev->raid_disks) 5190 info->raid_disk < mddev->raid_disks) {
5155 rdev->raid_disk = info->raid_disk; 5191 rdev->raid_disk = info->raid_disk;
5156 else 5192 set_bit(In_sync, &rdev->flags);
5193 } else
5157 rdev->raid_disk = -1; 5194 rdev->raid_disk = -1;
5158 } else 5195 } else
5159 super_types[mddev->major_version]. 5196 super_types[mddev->major_version].
@@ -5230,7 +5267,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5230 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5267 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5231 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5268 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5232 } else 5269 } else
5233 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5270 rdev->sb_start = calc_dev_sboffset(rdev);
5234 rdev->sectors = rdev->sb_start; 5271 rdev->sectors = rdev->sb_start;
5235 5272
5236 err = bind_rdev_to_array(rdev, mddev); 5273 err = bind_rdev_to_array(rdev, mddev);
@@ -5297,7 +5334,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
5297 } 5334 }
5298 5335
5299 if (mddev->persistent) 5336 if (mddev->persistent)
5300 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5337 rdev->sb_start = calc_dev_sboffset(rdev);
5301 else 5338 else
5302 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5339 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5303 5340
@@ -5510,7 +5547,6 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
5510 * sb_start or, if that is <data_offset, it must fit before the size 5547 * sb_start or, if that is <data_offset, it must fit before the size
5511 * of each device. If num_sectors is zero, we find the largest size 5548 * of each device. If num_sectors is zero, we find the largest size
5512 * that fits. 5549 * that fits.
5513
5514 */ 5550 */
5515 if (mddev->sync_thread) 5551 if (mddev->sync_thread)
5516 return -EBUSY; 5552 return -EBUSY;
@@ -5547,6 +5583,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
5547 mddev->delta_disks = raid_disks - mddev->raid_disks; 5583 mddev->delta_disks = raid_disks - mddev->raid_disks;
5548 5584
5549 rv = mddev->pers->check_reshape(mddev); 5585 rv = mddev->pers->check_reshape(mddev);
5586 if (rv < 0)
5587 mddev->delta_disks = 0;
5550 return rv; 5588 return rv;
5551} 5589}
5552 5590
@@ -6033,7 +6071,8 @@ static int md_thread(void * arg)
6033 || kthread_should_stop(), 6071 || kthread_should_stop(),
6034 thread->timeout); 6072 thread->timeout);
6035 6073
6036 if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags)) 6074 clear_bit(THREAD_WAKEUP, &thread->flags);
6075 if (!kthread_should_stop())
6037 thread->run(thread->mddev); 6076 thread->run(thread->mddev);
6038 } 6077 }
6039 6078
@@ -6799,7 +6838,7 @@ void md_do_sync(mddev_t *mddev)
6799 desc, mdname(mddev)); 6838 desc, mdname(mddev));
6800 mddev->curr_resync = j; 6839 mddev->curr_resync = j;
6801 } 6840 }
6802 mddev->curr_resync_completed = mddev->curr_resync; 6841 mddev->curr_resync_completed = j;
6803 6842
6804 while (j < max_sectors) { 6843 while (j < max_sectors) {
6805 sector_t sectors; 6844 sector_t sectors;
@@ -6817,8 +6856,7 @@ void md_do_sync(mddev_t *mddev)
6817 md_unplug(mddev); 6856 md_unplug(mddev);
6818 wait_event(mddev->recovery_wait, 6857 wait_event(mddev->recovery_wait,
6819 atomic_read(&mddev->recovery_active) == 0); 6858 atomic_read(&mddev->recovery_active) == 0);
6820 mddev->curr_resync_completed = 6859 mddev->curr_resync_completed = j;
6821 mddev->curr_resync;
6822 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6860 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6823 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6861 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6824 } 6862 }
@@ -6954,9 +6992,6 @@ void md_do_sync(mddev_t *mddev)
6954 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6992 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6955 mddev->resync_min = mddev->curr_resync_completed; 6993 mddev->resync_min = mddev->curr_resync_completed;
6956 mddev->curr_resync = 0; 6994 mddev->curr_resync = 0;
6957 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6958 mddev->curr_resync_completed = 0;
6959 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6960 wake_up(&resync_wait); 6995 wake_up(&resync_wait);
6961 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6996 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6962 md_wakeup_thread(mddev->thread); 6997 md_wakeup_thread(mddev->thread);
@@ -6997,7 +7032,7 @@ static int remove_and_add_spares(mddev_t *mddev)
6997 } 7032 }
6998 } 7033 }
6999 7034
7000 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7035 if (mddev->degraded && !mddev->recovery_disabled) {
7001 list_for_each_entry(rdev, &mddev->disks, same_set) { 7036 list_for_each_entry(rdev, &mddev->disks, same_set) {
7002 if (rdev->raid_disk >= 0 && 7037 if (rdev->raid_disk >= 0 &&
7003 !test_bit(In_sync, &rdev->flags) && 7038 !test_bit(In_sync, &rdev->flags) &&
@@ -7023,6 +7058,45 @@ static int remove_and_add_spares(mddev_t *mddev)
7023 } 7058 }
7024 return spares; 7059 return spares;
7025} 7060}
7061
7062static void reap_sync_thread(mddev_t *mddev)
7063{
7064 mdk_rdev_t *rdev;
7065
7066 /* resync has finished, collect result */
7067 md_unregister_thread(mddev->sync_thread);
7068 mddev->sync_thread = NULL;
7069 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7070 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7071 /* success...*/
7072 /* activate any spares */
7073 if (mddev->pers->spare_active(mddev))
7074 sysfs_notify(&mddev->kobj, NULL,
7075 "degraded");
7076 }
7077 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7078 mddev->pers->finish_reshape)
7079 mddev->pers->finish_reshape(mddev);
7080 md_update_sb(mddev, 1);
7081
7082 /* if array is no-longer degraded, then any saved_raid_disk
7083 * information must be scrapped
7084 */
7085 if (!mddev->degraded)
7086 list_for_each_entry(rdev, &mddev->disks, same_set)
7087 rdev->saved_raid_disk = -1;
7088
7089 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7090 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7091 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7092 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7093 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7094 /* flag recovery needed just to double check */
7095 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7096 sysfs_notify_dirent_safe(mddev->sysfs_action);
7097 md_new_event(mddev);
7098}
7099
7026/* 7100/*
7027 * This routine is regularly called by all per-raid-array threads to 7101 * This routine is regularly called by all per-raid-array threads to
7028 * deal with generic issues like resync and super-block update. 7102 * deal with generic issues like resync and super-block update.
@@ -7047,9 +7121,6 @@ static int remove_and_add_spares(mddev_t *mddev)
7047 */ 7121 */
7048void md_check_recovery(mddev_t *mddev) 7122void md_check_recovery(mddev_t *mddev)
7049{ 7123{
7050 mdk_rdev_t *rdev;
7051
7052
7053 if (mddev->bitmap) 7124 if (mddev->bitmap)
7054 bitmap_daemon_work(mddev); 7125 bitmap_daemon_work(mddev);
7055 7126
@@ -7084,7 +7155,20 @@ void md_check_recovery(mddev_t *mddev)
7084 /* Only thing we do on a ro array is remove 7155 /* Only thing we do on a ro array is remove
7085 * failed devices. 7156 * failed devices.
7086 */ 7157 */
7087 remove_and_add_spares(mddev); 7158 mdk_rdev_t *rdev;
7159 list_for_each_entry(rdev, &mddev->disks, same_set)
7160 if (rdev->raid_disk >= 0 &&
7161 !test_bit(Blocked, &rdev->flags) &&
7162 test_bit(Faulty, &rdev->flags) &&
7163 atomic_read(&rdev->nr_pending)==0) {
7164 if (mddev->pers->hot_remove_disk(
7165 mddev, rdev->raid_disk)==0) {
7166 char nm[20];
7167 sprintf(nm,"rd%d", rdev->raid_disk);
7168 sysfs_remove_link(&mddev->kobj, nm);
7169 rdev->raid_disk = -1;
7170 }
7171 }
7088 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7172 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7089 goto unlock; 7173 goto unlock;
7090 } 7174 }
@@ -7117,34 +7201,7 @@ void md_check_recovery(mddev_t *mddev)
7117 goto unlock; 7201 goto unlock;
7118 } 7202 }
7119 if (mddev->sync_thread) { 7203 if (mddev->sync_thread) {
7120 /* resync has finished, collect result */ 7204 reap_sync_thread(mddev);
7121 md_unregister_thread(mddev->sync_thread);
7122 mddev->sync_thread = NULL;
7123 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7124 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7125 /* success...*/
7126 /* activate any spares */
7127 if (mddev->pers->spare_active(mddev))
7128 sysfs_notify(&mddev->kobj, NULL,
7129 "degraded");
7130 }
7131 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7132 mddev->pers->finish_reshape)
7133 mddev->pers->finish_reshape(mddev);
7134 md_update_sb(mddev, 1);
7135
7136 /* if array is no-longer degraded, then any saved_raid_disk
7137 * information must be scrapped
7138 */
7139 if (!mddev->degraded)
7140 list_for_each_entry(rdev, &mddev->disks, same_set)
7141 rdev->saved_raid_disk = -1;
7142
7143 mddev->recovery = 0;
7144 /* flag recovery needed just to double check */
7145 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7146 sysfs_notify_dirent_safe(mddev->sysfs_action);
7147 md_new_event(mddev);
7148 goto unlock; 7205 goto unlock;
7149 } 7206 }
7150 /* Set RUNNING before clearing NEEDED to avoid 7207 /* Set RUNNING before clearing NEEDED to avoid
@@ -7202,7 +7259,11 @@ void md_check_recovery(mddev_t *mddev)
7202 " thread...\n", 7259 " thread...\n",
7203 mdname(mddev)); 7260 mdname(mddev));
7204 /* leave the spares where they are, it shouldn't hurt */ 7261 /* leave the spares where they are, it shouldn't hurt */
7205 mddev->recovery = 0; 7262 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7263 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7264 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7265 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7266 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7206 } else 7267 } else
7207 md_wakeup_thread(mddev->sync_thread); 7268 md_wakeup_thread(mddev->sync_thread);
7208 sysfs_notify_dirent_safe(mddev->sysfs_action); 7269 sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d05bab55df4e..7e90b8593b2a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -60,6 +60,12 @@ struct mdk_rdev_s
60 mddev_t *mddev; /* RAID array if running */ 60 mddev_t *mddev; /* RAID array if running */
61 int last_events; /* IO event timestamp */ 61 int last_events; /* IO event timestamp */
62 62
63 /*
64 * If meta_bdev is non-NULL, it means that a separate device is
65 * being used to store the metadata (superblock/bitmap) which
66 * would otherwise be contained on the same device as the data (bdev).
67 */
68 struct block_device *meta_bdev;
63 struct block_device *bdev; /* block device handle */ 69 struct block_device *bdev; /* block device handle */
64 70
65 struct page *sb_page; 71 struct page *sb_page;
@@ -87,8 +93,6 @@ struct mdk_rdev_s
87#define Faulty 1 /* device is known to have a fault */ 93#define Faulty 1 /* device is known to have a fault */
88#define In_sync 2 /* device is in_sync with rest of array */ 94#define In_sync 2 /* device is in_sync with rest of array */
89#define WriteMostly 4 /* Avoid reading if at all possible */ 95#define WriteMostly 4 /* Avoid reading if at all possible */
90#define AllReserved 6 /* If whole device is reserved for
91 * one array */
92#define AutoDetected 7 /* added by auto-detect */ 96#define AutoDetected 7 /* added by auto-detect */
93#define Blocked 8 /* An error occured on an externally 97#define Blocked 8 /* An error occured on an externally
94 * managed array, don't allow writes 98 * managed array, don't allow writes
@@ -148,7 +152,8 @@ struct mddev_s
148 * are happening, so run/ 152 * are happening, so run/
149 * takeover/stop are not safe 153 * takeover/stop are not safe
150 */ 154 */
151 155 int ready; /* See when safe to pass
156 * IO requests down */
152 struct gendisk *gendisk; 157 struct gendisk *gendisk;
153 158
154 struct kobject kobj; 159 struct kobject kobj;
@@ -497,8 +502,8 @@ extern void md_flush_request(mddev_t *mddev, struct bio *bio);
497extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 502extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
498 sector_t sector, int size, struct page *page); 503 sector_t sector, int size, struct page *page);
499extern void md_super_wait(mddev_t *mddev); 504extern void md_super_wait(mddev_t *mddev);
500extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 505extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
501 struct page *page, int rw); 506 struct page *page, int rw, bool metadata_op);
502extern void md_do_sync(mddev_t *mddev); 507extern void md_do_sync(mddev_t *mddev);
503extern void md_new_event(mddev_t *mddev); 508extern void md_new_event(mddev_t *mddev);
504extern int md_allow_write(mddev_t *mddev); 509extern int md_allow_write(mddev_t *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c355e55..637a96855edb 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
179 rdev1->new_raid_disk = j; 179 rdev1->new_raid_disk = j;
180 } 180 }
181 181
182 if (mddev->level == 1) {
183 /* taiking over a raid1 array-
184 * we have only one active disk
185 */
186 j = 0;
187 rdev1->new_raid_disk = j;
188 }
189
182 if (j < 0 || j >= mddev->raid_disks) { 190 if (j < 0 || j >= mddev->raid_disks) {
183 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " 191 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
184 "aborting!\n", mdname(mddev), j); 192 "aborting!\n", mdname(mddev), j);
@@ -644,12 +652,38 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
644 return priv_conf; 652 return priv_conf;
645} 653}
646 654
655static void *raid0_takeover_raid1(mddev_t *mddev)
656{
657 raid0_conf_t *priv_conf;
658
659 /* Check layout:
660 * - (N - 1) mirror drives must be already faulty
661 */
662 if ((mddev->raid_disks - 1) != mddev->degraded) {
663 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
664 mdname(mddev));
665 return ERR_PTR(-EINVAL);
666 }
667
668 /* Set new parameters */
669 mddev->new_level = 0;
670 mddev->new_layout = 0;
671 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
672 mddev->delta_disks = 1 - mddev->raid_disks;
673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector;
675
676 create_strip_zones(mddev, &priv_conf);
677 return priv_conf;
678}
679
647static void *raid0_takeover(mddev_t *mddev) 680static void *raid0_takeover(mddev_t *mddev)
648{ 681{
649 /* raid0 can take over: 682 /* raid0 can take over:
650 * raid4 - if all data disks are active. 683 * raid4 - if all data disks are active.
651 * raid5 - providing it is Raid4 layout and one disk is faulty 684 * raid5 - providing it is Raid4 layout and one disk is faulty
652 * raid10 - assuming we have all necessary active disks 685 * raid10 - assuming we have all necessary active disks
686 * raid1 - with (N -1) mirror drives faulty
653 */ 687 */
654 if (mddev->level == 4) 688 if (mddev->level == 4)
655 return raid0_takeover_raid45(mddev); 689 return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
665 if (mddev->level == 10) 699 if (mddev->level == 10)
666 return raid0_takeover_raid10(mddev); 700 return raid0_takeover_raid10(mddev);
667 701
702 if (mddev->level == 1)
703 return raid0_takeover_raid1(mddev);
704
705 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
706 mddev->level);
707
668 return ERR_PTR(-EINVAL); 708 return ERR_PTR(-EINVAL);
669} 709}
670 710
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 845cf95b612c..a23ffa397ba9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,8 +1027,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1027 } else 1027 } else
1028 set_bit(Faulty, &rdev->flags); 1028 set_bit(Faulty, &rdev->flags);
1029 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1029 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1030 printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" 1030 printk(KERN_ALERT
1031 KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n", 1031 "md/raid1:%s: Disk failure on %s, disabling device.\n"
1032 "md/raid1:%s: Operation continuing on %d devices.\n",
1032 mdname(mddev), bdevname(rdev->bdev, b), 1033 mdname(mddev), bdevname(rdev->bdev, b),
1033 mdname(mddev), conf->raid_disks - mddev->degraded); 1034 mdname(mddev), conf->raid_disks - mddev->degraded);
1034} 1035}
@@ -1364,10 +1365,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1364 */ 1365 */
1365 rdev = conf->mirrors[d].rdev; 1366 rdev = conf->mirrors[d].rdev;
1366 if (sync_page_io(rdev, 1367 if (sync_page_io(rdev,
1367 sect + rdev->data_offset, 1368 sect,
1368 s<<9, 1369 s<<9,
1369 bio->bi_io_vec[idx].bv_page, 1370 bio->bi_io_vec[idx].bv_page,
1370 READ)) { 1371 READ, false)) {
1371 success = 1; 1372 success = 1;
1372 break; 1373 break;
1373 } 1374 }
@@ -1390,10 +1391,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1390 rdev = conf->mirrors[d].rdev; 1391 rdev = conf->mirrors[d].rdev;
1391 atomic_add(s, &rdev->corrected_errors); 1392 atomic_add(s, &rdev->corrected_errors);
1392 if (sync_page_io(rdev, 1393 if (sync_page_io(rdev,
1393 sect + rdev->data_offset, 1394 sect,
1394 s<<9, 1395 s<<9,
1395 bio->bi_io_vec[idx].bv_page, 1396 bio->bi_io_vec[idx].bv_page,
1396 WRITE) == 0) 1397 WRITE, false) == 0)
1397 md_error(mddev, rdev); 1398 md_error(mddev, rdev);
1398 } 1399 }
1399 d = start; 1400 d = start;
@@ -1405,10 +1406,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1405 continue; 1406 continue;
1406 rdev = conf->mirrors[d].rdev; 1407 rdev = conf->mirrors[d].rdev;
1407 if (sync_page_io(rdev, 1408 if (sync_page_io(rdev,
1408 sect + rdev->data_offset, 1409 sect,
1409 s<<9, 1410 s<<9,
1410 bio->bi_io_vec[idx].bv_page, 1411 bio->bi_io_vec[idx].bv_page,
1411 READ) == 0) 1412 READ, false) == 0)
1412 md_error(mddev, rdev); 1413 md_error(mddev, rdev);
1413 } 1414 }
1414 } else { 1415 } else {
@@ -1488,10 +1489,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
1488 rdev = conf->mirrors[d].rdev; 1489 rdev = conf->mirrors[d].rdev;
1489 if (rdev && 1490 if (rdev &&
1490 test_bit(In_sync, &rdev->flags) && 1491 test_bit(In_sync, &rdev->flags) &&
1491 sync_page_io(rdev, 1492 sync_page_io(rdev, sect, s<<9,
1492 sect + rdev->data_offset, 1493 conf->tmppage, READ, false))
1493 s<<9,
1494 conf->tmppage, READ))
1495 success = 1; 1494 success = 1;
1496 else { 1495 else {
1497 d++; 1496 d++;
@@ -1514,9 +1513,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
1514 rdev = conf->mirrors[d].rdev; 1513 rdev = conf->mirrors[d].rdev;
1515 if (rdev && 1514 if (rdev &&
1516 test_bit(In_sync, &rdev->flags)) { 1515 test_bit(In_sync, &rdev->flags)) {
1517 if (sync_page_io(rdev, 1516 if (sync_page_io(rdev, sect, s<<9,
1518 sect + rdev->data_offset, 1517 conf->tmppage, WRITE, false)
1519 s<<9, conf->tmppage, WRITE)
1520 == 0) 1518 == 0)
1521 /* Well, this device is dead */ 1519 /* Well, this device is dead */
1522 md_error(mddev, rdev); 1520 md_error(mddev, rdev);
@@ -1531,9 +1529,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
1531 rdev = conf->mirrors[d].rdev; 1529 rdev = conf->mirrors[d].rdev;
1532 if (rdev && 1530 if (rdev &&
1533 test_bit(In_sync, &rdev->flags)) { 1531 test_bit(In_sync, &rdev->flags)) {
1534 if (sync_page_io(rdev, 1532 if (sync_page_io(rdev, sect, s<<9,
1535 sect + rdev->data_offset, 1533 conf->tmppage, READ, false)
1536 s<<9, conf->tmppage, READ)
1537 == 0) 1534 == 0)
1538 /* Well, this device is dead */ 1535 /* Well, this device is dead */
1539 md_error(mddev, rdev); 1536 md_error(mddev, rdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0641674827f0..3b607b28741b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1051,8 +1051,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1051 } 1051 }
1052 set_bit(Faulty, &rdev->flags); 1052 set_bit(Faulty, &rdev->flags);
1053 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1053 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1054 printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n" 1054 printk(KERN_ALERT
1055 KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n", 1055 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1056 "md/raid10:%s: Operation continuing on %d devices.\n",
1056 mdname(mddev), bdevname(rdev->bdev, b), 1057 mdname(mddev), bdevname(rdev->bdev, b),
1057 mdname(mddev), conf->raid_disks - mddev->degraded); 1058 mdname(mddev), conf->raid_disks - mddev->degraded);
1058} 1059}
@@ -1559,9 +1560,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1559 rcu_read_unlock(); 1560 rcu_read_unlock();
1560 success = sync_page_io(rdev, 1561 success = sync_page_io(rdev,
1561 r10_bio->devs[sl].addr + 1562 r10_bio->devs[sl].addr +
1562 sect + rdev->data_offset, 1563 sect,
1563 s<<9, 1564 s<<9,
1564 conf->tmppage, READ); 1565 conf->tmppage, READ, false);
1565 rdev_dec_pending(rdev, mddev); 1566 rdev_dec_pending(rdev, mddev);
1566 rcu_read_lock(); 1567 rcu_read_lock();
1567 if (success) 1568 if (success)
@@ -1598,8 +1599,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1598 atomic_add(s, &rdev->corrected_errors); 1599 atomic_add(s, &rdev->corrected_errors);
1599 if (sync_page_io(rdev, 1600 if (sync_page_io(rdev,
1600 r10_bio->devs[sl].addr + 1601 r10_bio->devs[sl].addr +
1601 sect + rdev->data_offset, 1602 sect,
1602 s<<9, conf->tmppage, WRITE) 1603 s<<9, conf->tmppage, WRITE, false)
1603 == 0) { 1604 == 0) {
1604 /* Well, this device is dead */ 1605 /* Well, this device is dead */
1605 printk(KERN_NOTICE 1606 printk(KERN_NOTICE
@@ -1635,9 +1636,9 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1635 rcu_read_unlock(); 1636 rcu_read_unlock();
1636 if (sync_page_io(rdev, 1637 if (sync_page_io(rdev,
1637 r10_bio->devs[sl].addr + 1638 r10_bio->devs[sl].addr +
1638 sect + rdev->data_offset, 1639 sect,
1639 s<<9, conf->tmppage, 1640 s<<9, conf->tmppage,
1640 READ) == 0) { 1641 READ, false) == 0) {
1641 /* Well, this device is dead */ 1642 /* Well, this device is dead */
1642 printk(KERN_NOTICE 1643 printk(KERN_NOTICE
1643 "md/raid10:%s: unable to read back " 1644 "md/raid10:%s: unable to read back "
@@ -2462,11 +2463,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
2462 mddev->recovery_cp = MaxSector; 2463 mddev->recovery_cp = MaxSector;
2463 2464
2464 conf = setup_conf(mddev); 2465 conf = setup_conf(mddev);
2465 if (!IS_ERR(conf)) 2466 if (!IS_ERR(conf)) {
2466 list_for_each_entry(rdev, &mddev->disks, same_set) 2467 list_for_each_entry(rdev, &mddev->disks, same_set)
2467 if (rdev->raid_disk >= 0) 2468 if (rdev->raid_disk >= 0)
2468 rdev->new_raid_disk = rdev->raid_disk * 2; 2469 rdev->new_raid_disk = rdev->raid_disk * 2;
2469 2470 conf->barrier = 1;
2471 }
2472
2470 return conf; 2473 return conf;
2471} 2474}
2472 2475
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc574f303f8b..702812824195 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1721,7 +1721,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1721 set_bit(Faulty, &rdev->flags); 1721 set_bit(Faulty, &rdev->flags);
1722 printk(KERN_ALERT 1722 printk(KERN_ALERT
1723 "md/raid:%s: Disk failure on %s, disabling device.\n" 1723 "md/raid:%s: Disk failure on %s, disabling device.\n"
1724 KERN_ALERT
1725 "md/raid:%s: Operation continuing on %d devices.\n", 1724 "md/raid:%s: Operation continuing on %d devices.\n",
1726 mdname(mddev), 1725 mdname(mddev),
1727 bdevname(rdev->bdev, b), 1726 bdevname(rdev->bdev, b),
@@ -4237,7 +4236,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
4237 wait_event(conf->wait_for_overlap, 4236 wait_event(conf->wait_for_overlap,
4238 atomic_read(&conf->reshape_stripes)==0); 4237 atomic_read(&conf->reshape_stripes)==0);
4239 mddev->reshape_position = conf->reshape_progress; 4238 mddev->reshape_position = conf->reshape_progress;
4240 mddev->curr_resync_completed = mddev->curr_resync; 4239 mddev->curr_resync_completed = sector_nr;
4241 conf->reshape_checkpoint = jiffies; 4240 conf->reshape_checkpoint = jiffies;
4242 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4241 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4243 md_wakeup_thread(mddev->thread); 4242 md_wakeup_thread(mddev->thread);
@@ -4338,7 +4337,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
4338 wait_event(conf->wait_for_overlap, 4337 wait_event(conf->wait_for_overlap,
4339 atomic_read(&conf->reshape_stripes) == 0); 4338 atomic_read(&conf->reshape_stripes) == 0);
4340 mddev->reshape_position = conf->reshape_progress; 4339 mddev->reshape_position = conf->reshape_progress;
4341 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors; 4340 mddev->curr_resync_completed = sector_nr;
4342 conf->reshape_checkpoint = jiffies; 4341 conf->reshape_checkpoint = jiffies;
4343 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4342 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4344 md_wakeup_thread(mddev->thread); 4343 md_wakeup_thread(mddev->thread);
@@ -5339,7 +5338,7 @@ static int raid5_spare_active(mddev_t *mddev)
5339 && !test_bit(Faulty, &tmp->rdev->flags) 5338 && !test_bit(Faulty, &tmp->rdev->flags)
5340 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5339 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5341 count++; 5340 count++;
5342 sysfs_notify_dirent(tmp->rdev->sysfs_state); 5341 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5343 } 5342 }
5344 } 5343 }
5345 spin_lock_irqsave(&conf->device_lock, flags); 5344 spin_lock_irqsave(&conf->device_lock, flags);
@@ -5518,7 +5517,6 @@ static int raid5_start_reshape(mddev_t *mddev)
5518 raid5_conf_t *conf = mddev->private; 5517 raid5_conf_t *conf = mddev->private;
5519 mdk_rdev_t *rdev; 5518 mdk_rdev_t *rdev;
5520 int spares = 0; 5519 int spares = 0;
5521 int added_devices = 0;
5522 unsigned long flags; 5520 unsigned long flags;
5523 5521
5524 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5522 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5528,8 +5526,8 @@ static int raid5_start_reshape(mddev_t *mddev)
5528 return -ENOSPC; 5526 return -ENOSPC;
5529 5527
5530 list_for_each_entry(rdev, &mddev->disks, same_set) 5528 list_for_each_entry(rdev, &mddev->disks, same_set)
5531 if (rdev->raid_disk < 0 && 5529 if (!test_bit(In_sync, &rdev->flags)
5532 !test_bit(Faulty, &rdev->flags)) 5530 && !test_bit(Faulty, &rdev->flags))
5533 spares++; 5531 spares++;
5534 5532
5535 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5533 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5572,29 +5570,35 @@ static int raid5_start_reshape(mddev_t *mddev)
5572 * to correctly record the "partially reconstructed" state of 5570 * to correctly record the "partially reconstructed" state of
5573 * such devices during the reshape and confusion could result. 5571 * such devices during the reshape and confusion could result.
5574 */ 5572 */
5575 if (mddev->delta_disks >= 0) 5573 if (mddev->delta_disks >= 0) {
5576 list_for_each_entry(rdev, &mddev->disks, same_set) 5574 int added_devices = 0;
5577 if (rdev->raid_disk < 0 && 5575 list_for_each_entry(rdev, &mddev->disks, same_set)
5578 !test_bit(Faulty, &rdev->flags)) { 5576 if (rdev->raid_disk < 0 &&
5579 if (raid5_add_disk(mddev, rdev) == 0) { 5577 !test_bit(Faulty, &rdev->flags)) {
5580 char nm[20]; 5578 if (raid5_add_disk(mddev, rdev) == 0) {
5581 if (rdev->raid_disk >= conf->previous_raid_disks) { 5579 char nm[20];
5582 set_bit(In_sync, &rdev->flags); 5580 if (rdev->raid_disk
5583 added_devices++; 5581 >= conf->previous_raid_disks) {
5584 } else 5582 set_bit(In_sync, &rdev->flags);
5585 rdev->recovery_offset = 0; 5583 added_devices++;
5586 sprintf(nm, "rd%d", rdev->raid_disk); 5584 } else
5587 if (sysfs_create_link(&mddev->kobj, 5585 rdev->recovery_offset = 0;
5588 &rdev->kobj, nm)) 5586 sprintf(nm, "rd%d", rdev->raid_disk);
5589 /* Failure here is OK */; 5587 if (sysfs_create_link(&mddev->kobj,
5590 } else 5588 &rdev->kobj, nm))
5591 break; 5589 /* Failure here is OK */;
5592 } 5590 }
5591 } else if (rdev->raid_disk >= conf->previous_raid_disks
5592 && !test_bit(Faulty, &rdev->flags)) {
5593 /* This is a spare that was manually added */
5594 set_bit(In_sync, &rdev->flags);
5595 added_devices++;
5596 }
5593 5597
5594 /* When a reshape changes the number of devices, ->degraded 5598 /* When a reshape changes the number of devices,
5595 * is measured against the larger of the pre and post number of 5599 * ->degraded is measured against the larger of the
5596 * devices.*/ 5600 * pre and post number of devices.
5597 if (mddev->delta_disks > 0) { 5601 */
5598 spin_lock_irqsave(&conf->device_lock, flags); 5602 spin_lock_irqsave(&conf->device_lock, flags);
5599 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5603 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5600 - added_devices; 5604 - added_devices;
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 982f000a57ff..9f47e383c57a 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -452,7 +452,7 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
452 INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device)); 452 INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
453 dev->ext = ext; 453 dev->ext = ext;
454 454
455 mutex_init(&dev->lock); 455 mutex_init(&dev->v4l2_lock);
456 spin_lock_init(&dev->int_slock); 456 spin_lock_init(&dev->int_slock);
457 spin_lock_init(&dev->slock); 457 spin_lock_init(&dev->slock);
458 458
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index e3fedc60fe77..1bd3dd762c6b 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -15,18 +15,15 @@ int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit)
15 } 15 }
16 16
17 /* is it free? */ 17 /* is it free? */
18 mutex_lock(&dev->lock);
19 if (vv->resources & bit) { 18 if (vv->resources & bit) {
20 DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit)); 19 DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
21 /* no, someone else uses it */ 20 /* no, someone else uses it */
22 mutex_unlock(&dev->lock);
23 return 0; 21 return 0;
24 } 22 }
25 /* it's free, grab it */ 23 /* it's free, grab it */
26 fh->resources |= bit; 24 fh->resources |= bit;
27 vv->resources |= bit; 25 vv->resources |= bit;
28 DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources)); 26 DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
29 mutex_unlock(&dev->lock);
30 return 1; 27 return 1;
31} 28}
32 29
@@ -37,11 +34,9 @@ void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits)
37 34
38 BUG_ON((fh->resources & bits) != bits); 35 BUG_ON((fh->resources & bits) != bits);
39 36
40 mutex_lock(&dev->lock);
41 fh->resources &= ~bits; 37 fh->resources &= ~bits;
42 vv->resources &= ~bits; 38 vv->resources &= ~bits;
43 DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources)); 39 DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
44 mutex_unlock(&dev->lock);
45} 40}
46 41
47 42
@@ -396,7 +391,7 @@ static const struct v4l2_file_operations video_fops =
396 .write = fops_write, 391 .write = fops_write,
397 .poll = fops_poll, 392 .poll = fops_poll,
398 .mmap = fops_mmap, 393 .mmap = fops_mmap,
399 .ioctl = video_ioctl2, 394 .unlocked_ioctl = video_ioctl2,
400}; 395};
401 396
402static void vv_callback(struct saa7146_dev *dev, unsigned long status) 397static void vv_callback(struct saa7146_dev *dev, unsigned long status)
@@ -505,6 +500,7 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
505 vfd->fops = &video_fops; 500 vfd->fops = &video_fops;
506 vfd->ioctl_ops = &dev->ext_vv_data->ops; 501 vfd->ioctl_ops = &dev->ext_vv_data->ops;
507 vfd->release = video_device_release; 502 vfd->release = video_device_release;
503 vfd->lock = &dev->v4l2_lock;
508 vfd->tvnorms = 0; 504 vfd->tvnorms = 0;
509 for (i = 0; i < dev->ext_vv_data->num_stds; i++) 505 for (i = 0; i < dev->ext_vv_data->num_stds; i++)
510 vfd->tvnorms |= dev->ext_vv_data->stds[i].id; 506 vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 2d4533ab22b7..afe85801d6ca 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
412 V4L2_BUF_TYPE_VBI_CAPTURE, 412 V4L2_BUF_TYPE_VBI_CAPTURE,
413 V4L2_FIELD_SEQ_TB, // FIXME: does this really work? 413 V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
414 sizeof(struct saa7146_buf), 414 sizeof(struct saa7146_buf),
415 file, NULL); 415 file, &dev->v4l2_lock);
416 416
417 init_timer(&fh->vbi_read_timeout); 417 init_timer(&fh->vbi_read_timeout);
418 fh->vbi_read_timeout.function = vbi_read_timeout; 418 fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 0ac5c619aecf..9aafa4e969a8 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -553,8 +553,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
553 } 553 }
554 } 554 }
555 555
556 mutex_lock(&dev->lock);
557
558 /* ok, accept it */ 556 /* ok, accept it */
559 vv->ov_fb = *fb; 557 vv->ov_fb = *fb;
560 vv->ov_fmt = fmt; 558 vv->ov_fmt = fmt;
@@ -563,8 +561,6 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
563 vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8; 561 vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
564 DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline)); 562 DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
565 } 563 }
566
567 mutex_unlock(&dev->lock);
568 return 0; 564 return 0;
569} 565}
570 566
@@ -649,8 +645,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
649 return -EINVAL; 645 return -EINVAL;
650 } 646 }
651 647
652 mutex_lock(&dev->lock);
653
654 switch (ctrl->type) { 648 switch (ctrl->type) {
655 case V4L2_CTRL_TYPE_BOOLEAN: 649 case V4L2_CTRL_TYPE_BOOLEAN:
656 case V4L2_CTRL_TYPE_MENU: 650 case V4L2_CTRL_TYPE_MENU:
@@ -693,7 +687,6 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
693 /* fixme: we can support changing VFLIP and HFLIP here... */ 687 /* fixme: we can support changing VFLIP and HFLIP here... */
694 if (IS_CAPTURE_ACTIVE(fh) != 0) { 688 if (IS_CAPTURE_ACTIVE(fh) != 0) {
695 DEB_D(("V4L2_CID_HFLIP while active capture.\n")); 689 DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
696 mutex_unlock(&dev->lock);
697 return -EBUSY; 690 return -EBUSY;
698 } 691 }
699 vv->hflip = c->value; 692 vv->hflip = c->value;
@@ -701,16 +694,13 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
701 case V4L2_CID_VFLIP: 694 case V4L2_CID_VFLIP:
702 if (IS_CAPTURE_ACTIVE(fh) != 0) { 695 if (IS_CAPTURE_ACTIVE(fh) != 0) {
703 DEB_D(("V4L2_CID_VFLIP while active capture.\n")); 696 DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
704 mutex_unlock(&dev->lock);
705 return -EBUSY; 697 return -EBUSY;
706 } 698 }
707 vv->vflip = c->value; 699 vv->vflip = c->value;
708 break; 700 break;
709 default: 701 default:
710 mutex_unlock(&dev->lock);
711 return -EINVAL; 702 return -EINVAL;
712 } 703 }
713 mutex_unlock(&dev->lock);
714 704
715 if (IS_OVERLAY_ACTIVE(fh) != 0) { 705 if (IS_OVERLAY_ACTIVE(fh) != 0) {
716 saa7146_stop_preview(fh); 706 saa7146_stop_preview(fh);
@@ -902,22 +892,18 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
902 err = vidioc_try_fmt_vid_overlay(file, fh, f); 892 err = vidioc_try_fmt_vid_overlay(file, fh, f);
903 if (0 != err) 893 if (0 != err)
904 return err; 894 return err;
905 mutex_lock(&dev->lock);
906 fh->ov.win = f->fmt.win; 895 fh->ov.win = f->fmt.win;
907 fh->ov.nclips = f->fmt.win.clipcount; 896 fh->ov.nclips = f->fmt.win.clipcount;
908 if (fh->ov.nclips > 16) 897 if (fh->ov.nclips > 16)
909 fh->ov.nclips = 16; 898 fh->ov.nclips = 16;
910 if (copy_from_user(fh->ov.clips, f->fmt.win.clips, 899 if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
911 sizeof(struct v4l2_clip) * fh->ov.nclips)) { 900 sizeof(struct v4l2_clip) * fh->ov.nclips)) {
912 mutex_unlock(&dev->lock);
913 return -EFAULT; 901 return -EFAULT;
914 } 902 }
915 903
916 /* fh->ov.fh is used to indicate that we have valid overlay informations, too */ 904 /* fh->ov.fh is used to indicate that we have valid overlay informations, too */
917 fh->ov.fh = fh; 905 fh->ov.fh = fh;
918 906
919 mutex_unlock(&dev->lock);
920
921 /* check if our current overlay is active */ 907 /* check if our current overlay is active */
922 if (IS_OVERLAY_ACTIVE(fh) != 0) { 908 if (IS_OVERLAY_ACTIVE(fh) != 0) {
923 saa7146_stop_preview(fh); 909 saa7146_stop_preview(fh);
@@ -976,8 +962,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
976 } 962 }
977 } 963 }
978 964
979 mutex_lock(&dev->lock);
980
981 for (i = 0; i < dev->ext_vv_data->num_stds; i++) 965 for (i = 0; i < dev->ext_vv_data->num_stds; i++)
982 if (*id & dev->ext_vv_data->stds[i].id) 966 if (*id & dev->ext_vv_data->stds[i].id)
983 break; 967 break;
@@ -988,8 +972,6 @@ static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *id)
988 found = 1; 972 found = 1;
989 } 973 }
990 974
991 mutex_unlock(&dev->lock);
992
993 if (vv->ov_suspend != NULL) { 975 if (vv->ov_suspend != NULL) {
994 saa7146_start_preview(vv->ov_suspend); 976 saa7146_start_preview(vv->ov_suspend);
995 vv->ov_suspend = NULL; 977 vv->ov_suspend = NULL;
@@ -1354,7 +1336,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
1354 V4L2_BUF_TYPE_VIDEO_CAPTURE, 1336 V4L2_BUF_TYPE_VIDEO_CAPTURE,
1355 V4L2_FIELD_INTERLACED, 1337 V4L2_FIELD_INTERLACED,
1356 sizeof(struct saa7146_buf), 1338 sizeof(struct saa7146_buf),
1357 file, NULL); 1339 file, &dev->v4l2_lock);
1358 1340
1359 return 0; 1341 return 0;
1360} 1342}
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 78b089526e02..6fc79f15dcbc 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@ config MEDIA_TUNER
34config MEDIA_TUNER_CUSTOMISE 34config MEDIA_TUNER_CUSTOMISE
35 bool "Customize analog and hybrid tuner modules to build" 35 bool "Customize analog and hybrid tuner modules to build"
36 depends on MEDIA_TUNER 36 depends on MEDIA_TUNER
37 default y if EMBEDDED 37 default y if EXPERT
38 help 38 help
39 This allows the user to deselect tuner drivers unnecessary 39 This allows the user to deselect tuner drivers unnecessary
40 for their hardware from the build. Use this option with care 40 for their hardware from the build. Use this option with care
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c9062ceddc71..bc6a67768af1 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -95,8 +95,7 @@ static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close)
95 msleep(20); 95 msleep(20);
96 } else { 96 } else {
97 msg = disable; 97 msg = disable;
98 tuner_i2c_xfer_send(&priv->i2c_props, msg, 1); 98 tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
99 tuner_i2c_xfer_recv(&priv->i2c_props, &msg[1], 1);
100 99
101 buf[2] = msg[1]; 100 buf[2] = msg[1];
102 buf[2] &= ~0x04; 101 buf[2] &= ~0x04;
@@ -233,19 +232,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
233 tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2); 232 tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
234 } 233 }
235 234
235
236 tda8290_i2c_bridge(fe, 1); 236 tda8290_i2c_bridge(fe, 1);
237 237
238 if (fe->ops.tuner_ops.set_analog_params) 238 if (fe->ops.tuner_ops.set_analog_params)
239 fe->ops.tuner_ops.set_analog_params(fe, params); 239 fe->ops.tuner_ops.set_analog_params(fe, params);
240 240
241 for (i = 0; i < 3; i++) { 241 for (i = 0; i < 3; i++) {
242 tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1); 242 tuner_i2c_xfer_send_recv(&priv->i2c_props,
243 tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1); 243 &addr_pll_stat, 1, &pll_stat, 1);
244 if (pll_stat & 0x80) { 244 if (pll_stat & 0x80) {
245 tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1); 245 tuner_i2c_xfer_send_recv(&priv->i2c_props,
246 tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1); 246 &addr_adc_sat, 1,
247 tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1); 247 &adc_sat, 1);
248 tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1); 248 tuner_i2c_xfer_send_recv(&priv->i2c_props,
249 &addr_agc_stat, 1,
250 &agc_stat, 1);
249 tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat); 251 tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
250 break; 252 break;
251 } else { 253 } else {
@@ -259,20 +261,22 @@ static void tda8290_set_params(struct dvb_frontend *fe,
259 agc_stat, adc_sat, pll_stat & 0x80); 261 agc_stat, adc_sat, pll_stat & 0x80);
260 tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2); 262 tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
261 msleep(100); 263 msleep(100);
262 tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1); 264 tuner_i2c_xfer_send_recv(&priv->i2c_props,
263 tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1); 265 &addr_agc_stat, 1, &agc_stat, 1);
264 tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1); 266 tuner_i2c_xfer_send_recv(&priv->i2c_props,
265 tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1); 267 &addr_pll_stat, 1, &pll_stat, 1);
266 if ((agc_stat > 115) || !(pll_stat & 0x80)) { 268 if ((agc_stat > 115) || !(pll_stat & 0x80)) {
267 tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n", 269 tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
268 agc_stat, pll_stat & 0x80); 270 agc_stat, pll_stat & 0x80);
269 if (priv->cfg.agcf) 271 if (priv->cfg.agcf)
270 priv->cfg.agcf(fe); 272 priv->cfg.agcf(fe);
271 msleep(100); 273 msleep(100);
272 tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1); 274 tuner_i2c_xfer_send_recv(&priv->i2c_props,
273 tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1); 275 &addr_agc_stat, 1,
274 tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1); 276 &agc_stat, 1);
275 tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1); 277 tuner_i2c_xfer_send_recv(&priv->i2c_props,
278 &addr_pll_stat, 1,
279 &pll_stat, 1);
276 if((agc_stat > 115) || !(pll_stat & 0x80)) { 280 if((agc_stat > 115) || !(pll_stat & 0x80)) {
277 tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat); 281 tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
278 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2); 282 tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
@@ -284,10 +288,12 @@ static void tda8290_set_params(struct dvb_frontend *fe,
284 288
285 /* l/ l' deadlock? */ 289 /* l/ l' deadlock? */
286 if(priv->tda8290_easy_mode & 0x60) { 290 if(priv->tda8290_easy_mode & 0x60) {
287 tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1); 291 tuner_i2c_xfer_send_recv(&priv->i2c_props,
288 tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1); 292 &addr_adc_sat, 1,
289 tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1); 293 &adc_sat, 1);
290 tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1); 294 tuner_i2c_xfer_send_recv(&priv->i2c_props,
295 &addr_pll_stat, 1,
296 &pll_stat, 1);
291 if ((adc_sat > 20) || !(pll_stat & 0x80)) { 297 if ((adc_sat > 20) || !(pll_stat & 0x80)) {
292 tuner_dbg("trying to resolve SECAM L deadlock\n"); 298 tuner_dbg("trying to resolve SECAM L deadlock\n");
293 tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2); 299 tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
@@ -307,8 +313,7 @@ static void tda8295_power(struct dvb_frontend *fe, int enable)
307 struct tda8290_priv *priv = fe->analog_demod_priv; 313 struct tda8290_priv *priv = fe->analog_demod_priv;
308 unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */ 314 unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
309 315
310 tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1); 316 tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
311 tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
312 317
313 if (enable) 318 if (enable)
314 buf[1] = 0x01; 319 buf[1] = 0x01;
@@ -323,8 +328,7 @@ static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable)
323 struct tda8290_priv *priv = fe->analog_demod_priv; 328 struct tda8290_priv *priv = fe->analog_demod_priv;
324 unsigned char buf[] = { 0x01, 0x00 }; 329 unsigned char buf[] = { 0x01, 0x00 };
325 330
326 tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1); 331 tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
327 tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
328 332
329 if (enable) 333 if (enable)
330 buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */ 334 buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
@@ -353,8 +357,7 @@ static void tda8295_agc1_out(struct dvb_frontend *fe, int enable)
353 struct tda8290_priv *priv = fe->analog_demod_priv; 357 struct tda8290_priv *priv = fe->analog_demod_priv;
354 unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */ 358 unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
355 359
356 tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1); 360 tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
357 tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
358 361
359 if (enable) 362 if (enable)
360 buf[1] &= ~0x40; 363 buf[1] &= ~0x40;
@@ -370,10 +373,10 @@ static void tda8295_agc2_out(struct dvb_frontend *fe, int enable)
370 unsigned char set_gpio_cf[] = { 0x44, 0x00 }; 373 unsigned char set_gpio_cf[] = { 0x44, 0x00 };
371 unsigned char set_gpio_val[] = { 0x46, 0x00 }; 374 unsigned char set_gpio_val[] = { 0x46, 0x00 };
372 375
373 tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_cf[0], 1); 376 tuner_i2c_xfer_send_recv(&priv->i2c_props,
374 tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_cf[1], 1); 377 &set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
375 tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_val[0], 1); 378 tuner_i2c_xfer_send_recv(&priv->i2c_props,
376 tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_val[1], 1); 379 &set_gpio_val[0], 1, &set_gpio_val[1], 1);
377 380
378 set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */ 381 set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
379 382
@@ -392,8 +395,7 @@ static int tda8295_has_signal(struct dvb_frontend *fe)
392 unsigned char hvpll_stat = 0x26; 395 unsigned char hvpll_stat = 0x26;
393 unsigned char ret; 396 unsigned char ret;
394 397
395 tuner_i2c_xfer_send(&priv->i2c_props, &hvpll_stat, 1); 398 tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
396 tuner_i2c_xfer_recv(&priv->i2c_props, &ret, 1);
397 return (ret & 0x01) ? 65535 : 0; 399 return (ret & 0x01) ? 65535 : 0;
398} 400}
399 401
@@ -413,8 +415,8 @@ static void tda8295_set_params(struct dvb_frontend *fe,
413 tda8295_power(fe, 1); 415 tda8295_power(fe, 1);
414 tda8295_agc1_out(fe, 1); 416 tda8295_agc1_out(fe, 1);
415 417
416 tuner_i2c_xfer_send(&priv->i2c_props, &blanking_mode[0], 1); 418 tuner_i2c_xfer_send_recv(&priv->i2c_props,
417 tuner_i2c_xfer_recv(&priv->i2c_props, &blanking_mode[1], 1); 419 &blanking_mode[0], 1, &blanking_mode[1], 1);
418 420
419 tda8295_set_video_std(fe); 421 tda8295_set_video_std(fe);
420 422
@@ -447,8 +449,8 @@ static int tda8290_has_signal(struct dvb_frontend *fe)
447 unsigned char i2c_get_afc[1] = { 0x1B }; 449 unsigned char i2c_get_afc[1] = { 0x1B };
448 unsigned char afc = 0; 450 unsigned char afc = 0;
449 451
450 tuner_i2c_xfer_send(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc)); 452 tuner_i2c_xfer_send_recv(&priv->i2c_props,
451 tuner_i2c_xfer_recv(&priv->i2c_props, &afc, 1); 453 i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
452 return (afc & 0x80)? 65535:0; 454 return (afc & 0x80)? 65535:0;
453} 455}
454 456
@@ -654,20 +656,26 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
654static int tda8290_probe(struct tuner_i2c_props *i2c_props) 656static int tda8290_probe(struct tuner_i2c_props *i2c_props)
655{ 657{
656#define TDA8290_ID 0x89 658#define TDA8290_ID 0x89
657 unsigned char tda8290_id[] = { 0x1f, 0x00 }; 659 u8 reg = 0x1f, id;
660 struct i2c_msg msg_read[] = {
661 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
662 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
663 };
658 664
659 /* detect tda8290 */ 665 /* detect tda8290 */
660 tuner_i2c_xfer_send(i2c_props, &tda8290_id[0], 1); 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
661 tuner_i2c_xfer_recv(i2c_props, &tda8290_id[1], 1); 667 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
668 __func__, reg);
669 return -ENODEV;
670 }
662 671
663 if (tda8290_id[1] == TDA8290_ID) { 672 if (id == TDA8290_ID) {
664 if (debug) 673 if (debug)
665 printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n", 674 printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
666 __func__, i2c_adapter_id(i2c_props->adap), 675 __func__, i2c_adapter_id(i2c_props->adap),
667 i2c_props->addr); 676 i2c_props->addr);
668 return 0; 677 return 0;
669 } 678 }
670
671 return -ENODEV; 679 return -ENODEV;
672} 680}
673 681
@@ -675,16 +683,23 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
675{ 683{
676#define TDA8295_ID 0x8a 684#define TDA8295_ID 0x8a
677#define TDA8295C2_ID 0x8b 685#define TDA8295C2_ID 0x8b
678 unsigned char tda8295_id[] = { 0x2f, 0x00 }; 686 u8 reg = 0x2f, id;
687 struct i2c_msg msg_read[] = {
688 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
689 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
690 };
679 691
680 /* detect tda8295 */ 692 /* detect tda8290 */
681 tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1); 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
682 tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1); 694 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
695 __func__, reg);
696 return -ENODEV;
697 }
683 698
684 if ((tda8295_id[1] & 0xfe) == TDA8295_ID) { 699 if ((id & 0xfe) == TDA8295_ID) {
685 if (debug) 700 if (debug)
686 printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n", 701 printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
687 __func__, (tda8295_id[1] == TDA8295_ID) ? 702 __func__, (id == TDA8295_ID) ?
688 "tda8295c1" : "tda8295c2", 703 "tda8295c1" : "tda8295c2",
689 i2c_adapter_id(i2c_props->adap), 704 i2c_adapter_id(i2c_props->adap),
690 i2c_props->addr); 705 i2c_props->addr);
@@ -740,9 +755,11 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
740 sizeof(struct analog_demod_ops)); 755 sizeof(struct analog_demod_ops));
741 } 756 }
742 757
743 if ((!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) && 758 if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
744 (tda829x_find_tuner(fe) < 0)) 759 tda8295_power(fe, 1);
745 goto fail; 760 if (tda829x_find_tuner(fe) < 0)
761 goto fail;
762 }
746 763
747 switch (priv->ver) { 764 switch (priv->ver) {
748 case TDA8290: 765 case TDA8290:
@@ -786,6 +803,8 @@ struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
786 return fe; 803 return fe;
787 804
788fail: 805fail:
806 memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
807
789 tda829x_release(fe); 808 tda829x_release(fe);
790 return NULL; 809 return NULL;
791} 810}
@@ -809,8 +828,8 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
809 int i; 828 int i;
810 829
811 /* rule out tda9887, which would return the same byte repeatedly */ 830 /* rule out tda9887, which would return the same byte repeatedly */
812 tuner_i2c_xfer_send(&i2c_props, soft_reset, 1); 831 tuner_i2c_xfer_send_recv(&i2c_props,
813 tuner_i2c_xfer_recv(&i2c_props, buf, PROBE_BUFFER_SIZE); 832 soft_reset, 1, buf, PROBE_BUFFER_SIZE);
814 for (i = 1; i < PROBE_BUFFER_SIZE; i++) { 833 for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
815 if (buf[i] != buf[0]) 834 if (buf[i] != buf[0])
816 break; 835 break;
@@ -827,13 +846,12 @@ int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
827 /* fall back to old probing method */ 846 /* fall back to old probing method */
828 tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2); 847 tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
829 tuner_i2c_xfer_send(&i2c_props, soft_reset, 2); 848 tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
830 tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1); 849 tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
831 tuner_i2c_xfer_recv(&i2c_props, &data, 1);
832 if (data == 0) { 850 if (data == 0) {
833 tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2); 851 tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
834 tuner_i2c_xfer_send(&i2c_props, soft_reset, 2); 852 tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
835 tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1); 853 tuner_i2c_xfer_send_recv(&i2c_props,
836 tuner_i2c_xfer_recv(&i2c_props, &data, 1); 854 &addr_dto_lsb, 1, &data, 1);
837 if (data == 0x7b) { 855 if (data == 0x7b) {
838 return 0; 856 return 0;
839 } 857 }
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 8ca48f76dfa9..98ffb40728e3 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -514,8 +514,8 @@ struct dib0700_rc_response {
514 union { 514 union {
515 u16 system16; 515 u16 system16;
516 struct { 516 struct {
517 u8 system;
518 u8 not_system; 517 u8 not_system;
518 u8 system;
519 }; 519 };
520 }; 520 };
521 u8 data; 521 u8 data;
@@ -575,7 +575,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
575 if ((poll_reply->system ^ poll_reply->not_system) != 0xff) { 575 if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
576 deb_data("NEC extended protocol\n"); 576 deb_data("NEC extended protocol\n");
577 /* NEC extended code - 24 bits */ 577 /* NEC extended code - 24 bits */
578 keycode = poll_reply->system16 << 8 | poll_reply->data; 578 keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data;
579 } else { 579 } else {
580 deb_data("NEC normal protocol\n"); 580 deb_data("NEC normal protocol\n");
581 /* normal NEC code - 16 bits */ 581 /* normal NEC code - 16 bits */
@@ -587,7 +587,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
587 deb_data("RC5 protocol\n"); 587 deb_data("RC5 protocol\n");
588 /* RC5 Protocol */ 588 /* RC5 Protocol */
589 toggle = poll_reply->report_id; 589 toggle = poll_reply->report_id;
590 keycode = poll_reply->system16 << 8 | poll_reply->data; 590 keycode = poll_reply->system << 8 | poll_reply->data;
591 591
592 break; 592 break;
593 } 593 }
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index fcf3828472b8..f82d4a93feb3 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -172,7 +172,8 @@ void fdtv_unregister_rc(struct firedtv *fdtv)
172 172
173void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code) 173void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
174{ 174{
175 u16 *keycode = fdtv->remote_ctrl_dev->keycode; 175 struct input_dev *idev = fdtv->remote_ctrl_dev;
176 u16 *keycode = idev->keycode;
176 177
177 if (code >= 0x0300 && code <= 0x031f) 178 if (code >= 0x0300 && code <= 0x031f)
178 code = keycode[code - 0x0300]; 179 code = keycode[code - 0x0300];
@@ -188,6 +189,8 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
188 return; 189 return;
189 } 190 }
190 191
191 input_report_key(fdtv->remote_ctrl_dev, code, 1); 192 input_report_key(idev, code, 1);
192 input_report_key(fdtv->remote_ctrl_dev, code, 0); 193 input_sync(idev);
194 input_report_key(idev, code, 0);
195 input_sync(idev);
193} 196}
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index ef3e43a03199..b8519ba511e5 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
1config DVB_FE_CUSTOMISE 1config DVB_FE_CUSTOMISE
2 bool "Customise the frontend modules to build" 2 bool "Customise the frontend modules to build"
3 depends on DVB_CORE 3 depends on DVB_CORE
4 default y if EMBEDDED 4 default y if EXPERT
5 help 5 help
6 This allows the user to select/deselect frontend drivers for their 6 This allows the user to select/deselect frontend drivers for their
7 hardware from the build. 7 hardware from the build.
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index ce222055526d..ba25fa0b0fc2 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -334,11 +334,11 @@ static int af9013_set_freq_ctrl(struct af9013_state *state, fe_bandwidth_t bw)
334 if_sample_freq = 3300000; /* 3.3 MHz */ 334 if_sample_freq = 3300000; /* 3.3 MHz */
335 break; 335 break;
336 case BANDWIDTH_7_MHZ: 336 case BANDWIDTH_7_MHZ:
337 if_sample_freq = 3800000; /* 3.8 MHz */ 337 if_sample_freq = 3500000; /* 3.5 MHz */
338 break; 338 break;
339 case BANDWIDTH_8_MHZ: 339 case BANDWIDTH_8_MHZ:
340 default: 340 default:
341 if_sample_freq = 4300000; /* 4.3 MHz */ 341 if_sample_freq = 4000000; /* 4.0 MHz */
342 break; 342 break;
343 } 343 }
344 } else if (state->config.tuner == AF9013_TUNER_TDA18218) { 344 } else if (state->config.tuner == AF9013_TUNER_TDA18218) {
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 6360c681ded9..6c2e929bd79f 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -311,7 +311,7 @@ struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
311 return fe; 311 return fe;
312 312
313error: 313error:
314 ix2505v_release(fe); 314 kfree(state);
315 return NULL; 315 return NULL;
316} 316}
317EXPORT_SYMBOL(ix2505v_attach); 317EXPORT_SYMBOL(ix2505v_attach);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index d3ad3e75a35a..cc4acd2f920d 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -43,6 +43,8 @@ struct mb86a20s_state {
43 const struct mb86a20s_config *config; 43 const struct mb86a20s_config *config;
44 44
45 struct dvb_frontend frontend; 45 struct dvb_frontend frontend;
46
47 bool need_init;
46}; 48};
47 49
48struct regdata { 50struct regdata {
@@ -318,7 +320,7 @@ static int mb86a20s_i2c_writereg(struct mb86a20s_state *state,
318 320
319 rc = i2c_transfer(state->i2c, &msg, 1); 321 rc = i2c_transfer(state->i2c, &msg, 1);
320 if (rc != 1) { 322 if (rc != 1) {
321 printk("%s: writereg rcor(rc == %i, reg == 0x%02x," 323 printk("%s: writereg error (rc == %i, reg == 0x%02x,"
322 " data == 0x%02x)\n", __func__, rc, reg, data); 324 " data == 0x%02x)\n", __func__, rc, reg, data);
323 return rc; 325 return rc;
324 } 326 }
@@ -353,7 +355,7 @@ static int mb86a20s_i2c_readreg(struct mb86a20s_state *state,
353 rc = i2c_transfer(state->i2c, msg, 2); 355 rc = i2c_transfer(state->i2c, msg, 2);
354 356
355 if (rc != 2) { 357 if (rc != 2) {
356 rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc); 358 rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
357 return rc; 359 return rc;
358 } 360 }
359 361
@@ -382,23 +384,31 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
382 /* Initialize the frontend */ 384 /* Initialize the frontend */
383 rc = mb86a20s_writeregdata(state, mb86a20s_init); 385 rc = mb86a20s_writeregdata(state, mb86a20s_init);
384 if (rc < 0) 386 if (rc < 0)
385 return rc; 387 goto err;
386 388
387 if (!state->config->is_serial) { 389 if (!state->config->is_serial) {
388 regD5 &= ~1; 390 regD5 &= ~1;
389 391
390 rc = mb86a20s_writereg(state, 0x50, 0xd5); 392 rc = mb86a20s_writereg(state, 0x50, 0xd5);
391 if (rc < 0) 393 if (rc < 0)
392 return rc; 394 goto err;
393 rc = mb86a20s_writereg(state, 0x51, regD5); 395 rc = mb86a20s_writereg(state, 0x51, regD5);
394 if (rc < 0) 396 if (rc < 0)
395 return rc; 397 goto err;
396 } 398 }
397 399
398 if (fe->ops.i2c_gate_ctrl) 400 if (fe->ops.i2c_gate_ctrl)
399 fe->ops.i2c_gate_ctrl(fe, 1); 401 fe->ops.i2c_gate_ctrl(fe, 1);
400 402
401 return 0; 403err:
404 if (rc < 0) {
405 state->need_init = true;
406 printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
407 } else {
408 state->need_init = false;
409 dprintk("Initialization succeded.\n");
410 }
411 return rc;
402} 412}
403 413
404static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength) 414static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
@@ -485,8 +495,22 @@ static int mb86a20s_set_frontend(struct dvb_frontend *fe,
485 495
486 if (fe->ops.i2c_gate_ctrl) 496 if (fe->ops.i2c_gate_ctrl)
487 fe->ops.i2c_gate_ctrl(fe, 1); 497 fe->ops.i2c_gate_ctrl(fe, 1);
498 dprintk("Calling tuner set parameters\n");
488 fe->ops.tuner_ops.set_params(fe, p); 499 fe->ops.tuner_ops.set_params(fe, p);
489 500
501 /*
502 * Make it more reliable: if, for some reason, the initial
503 * device initialization doesn't happen, initialize it when
504 * a SBTVD parameters are adjusted.
505 *
506 * Unfortunately, due to a hard to track bug at tda829x/tda18271,
507 * the agc callback logic is not called during DVB attach time,
508 * causing mb86a20s to not be initialized with Kworld SBTVD.
509 * So, this hack is needed, in order to make Kworld SBTVD to work.
510 */
511 if (state->need_init)
512 mb86a20s_initfe(fe);
513
490 if (fe->ops.i2c_gate_ctrl) 514 if (fe->ops.i2c_gate_ctrl)
491 fe->ops.i2c_gate_ctrl(fe, 0); 515 fe->ops.i2c_gate_ctrl(fe, 0);
492 rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception); 516 rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 122c72806916..9fc1dd0ba4c3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
277 { 277 {
278 ca_slot_info_t *info=(ca_slot_info_t *)parg; 278 ca_slot_info_t *info=(ca_slot_info_t *)parg;
279 279
280 if (info->num > 1) 280 if (info->num < 0 || info->num > 1)
281 return -EINVAL; 281 return -EINVAL;
282 av7110->ci_slot[info->num].num = info->num; 282 av7110->ci_slot[info->num].num = info->num;
283 av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? 283 av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 3c5a4739ed70..ecdffa6aac66 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,20 +151,6 @@ config RADIO_GEMTEK_PROBE
151 following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and 151 following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
152 0x28c. 152 0x28c.
153 153
154config RADIO_GEMTEK_PCI
155 tristate "GemTek PCI Radio Card support"
156 depends on VIDEO_V4L2 && PCI
157 ---help---
158 Choose Y here if you have this PCI FM radio card.
159
160 In order to control your radio card, you will need to use programs
161 that are compatible with the Video for Linux API. Information on
162 this API and pointers to "v4l" programs may be found at
163 <file:Documentation/video4linux/API.html>.
164
165 To compile this driver as a module, choose M here: the
166 module will be called radio-gemtek-pci.
167
168config RADIO_MAXIRADIO 154config RADIO_MAXIRADIO
169 tristate "Guillemot MAXI Radio FM 2000 radio" 155 tristate "Guillemot MAXI Radio FM 2000 radio"
170 depends on VIDEO_V4L2 && PCI 156 depends on VIDEO_V4L2 && PCI
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index d2970748a69f..717656d2f749 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
13obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o 13obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
14obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o 14obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
15obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o 15obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
16obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
17obj-$(CONFIG_RADIO_TRUST) += radio-trust.o 16obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
18obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o 17obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
19obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o 18obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 6cc5d130fbc8..4ce10dbeadd8 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,6 +31,7 @@
31#include <linux/module.h> /* Modules */ 31#include <linux/module.h> /* Modules */
32#include <linux/init.h> /* Initdata */ 32#include <linux/init.h> /* Initdata */
33#include <linux/ioport.h> /* request_region */ 33#include <linux/ioport.h> /* request_region */
34#include <linux/delay.h> /* msleep */
34#include <linux/videodev2.h> /* kernel radio structs */ 35#include <linux/videodev2.h> /* kernel radio structs */
35#include <linux/version.h> /* for KERNEL_VERSION MACRO */ 36#include <linux/version.h> /* for KERNEL_VERSION MACRO */
36#include <linux/io.h> /* outb, outb_p */ 37#include <linux/io.h> /* outb, outb_p */
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
deleted file mode 100644
index 28fa85ba2087..000000000000
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ /dev/null
@@ -1,478 +0,0 @@
1/*
2 ***************************************************************************
3 *
4 * radio-gemtek-pci.c - Gemtek PCI Radio driver
5 * (C) 2001 Vladimir Shebordaev <vshebordaev@mail.ru>
6 *
7 ***************************************************************************
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public
20 * License along with this program; if not, write to the Free
21 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
22 * USA.
23 *
24 ***************************************************************************
25 *
26 * Gemtek Corp still silently refuses to release any specifications
27 * of their multimedia devices, so the protocol still has to be
28 * reverse engineered.
29 *
30 * The v4l code was inspired by Jonas Munsin's Gemtek serial line
31 * radio device driver.
32 *
33 * Please, let me know if this piece of code was useful :)
34 *
35 * TODO: multiple device support and portability were not tested
36 *
37 * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
38 *
39 ***************************************************************************
40 */
41
42#include <linux/types.h>
43#include <linux/list.h>
44#include <linux/module.h>
45#include <linux/init.h>
46#include <linux/pci.h>
47#include <linux/videodev2.h>
48#include <linux/errno.h>
49#include <linux/version.h> /* for KERNEL_VERSION MACRO */
50#include <linux/io.h>
51#include <linux/slab.h>
52#include <media/v4l2-device.h>
53#include <media/v4l2-ioctl.h>
54
55MODULE_AUTHOR("Vladimir Shebordaev <vshebordaev@mail.ru>");
56MODULE_DESCRIPTION("The video4linux driver for the Gemtek PCI Radio Card");
57MODULE_LICENSE("GPL");
58
59static int nr_radio = -1;
60static int mx = 1;
61
62module_param(mx, bool, 0);
63MODULE_PARM_DESC(mx, "single digit: 1 - turn off the turner upon module exit (default), 0 - do not");
64module_param(nr_radio, int, 0);
65MODULE_PARM_DESC(nr_radio, "video4linux device number to use");
66
67#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
68
69#ifndef PCI_VENDOR_ID_GEMTEK
70#define PCI_VENDOR_ID_GEMTEK 0x5046
71#endif
72
73#ifndef PCI_DEVICE_ID_GEMTEK_PR103
74#define PCI_DEVICE_ID_GEMTEK_PR103 0x1001
75#endif
76
77#ifndef GEMTEK_PCI_RANGE_LOW
78#define GEMTEK_PCI_RANGE_LOW (87*16000)
79#endif
80
81#ifndef GEMTEK_PCI_RANGE_HIGH
82#define GEMTEK_PCI_RANGE_HIGH (108*16000)
83#endif
84
85struct gemtek_pci {
86 struct v4l2_device v4l2_dev;
87 struct video_device vdev;
88 struct mutex lock;
89 struct pci_dev *pdev;
90
91 u32 iobase;
92 u32 length;
93
94 u32 current_frequency;
95 u8 mute;
96};
97
98static inline struct gemtek_pci *to_gemtek_pci(struct v4l2_device *v4l2_dev)
99{
100 return container_of(v4l2_dev, struct gemtek_pci, v4l2_dev);
101}
102
103static inline u8 gemtek_pci_out(u16 value, u32 port)
104{
105 outw(value, port);
106
107 return (u8)value;
108}
109
110#define _b0(v) (*((u8 *)&v))
111
112static void __gemtek_pci_cmd(u16 value, u32 port, u8 *last_byte, int keep)
113{
114 u8 byte = *last_byte;
115
116 if (!value) {
117 if (!keep)
118 value = (u16)port;
119 byte &= 0xfd;
120 } else
121 byte |= 2;
122
123 _b0(value) = byte;
124 outw(value, port);
125 byte |= 1;
126 _b0(value) = byte;
127 outw(value, port);
128 byte &= 0xfe;
129 _b0(value) = byte;
130 outw(value, port);
131
132 *last_byte = byte;
133}
134
135static inline void gemtek_pci_nil(u32 port, u8 *last_byte)
136{
137 __gemtek_pci_cmd(0x00, port, last_byte, false);
138}
139
140static inline void gemtek_pci_cmd(u16 cmd, u32 port, u8 *last_byte)
141{
142 __gemtek_pci_cmd(cmd, port, last_byte, true);
143}
144
145static void gemtek_pci_setfrequency(struct gemtek_pci *card, unsigned long frequency)
146{
147 int i;
148 u32 value = frequency / 200 + 856;
149 u16 mask = 0x8000;
150 u8 last_byte;
151 u32 port = card->iobase;
152
153 mutex_lock(&card->lock);
154 card->current_frequency = frequency;
155 last_byte = gemtek_pci_out(0x06, port);
156
157 i = 0;
158 do {
159 gemtek_pci_nil(port, &last_byte);
160 i++;
161 } while (i < 9);
162
163 i = 0;
164 do {
165 gemtek_pci_cmd(value & mask, port, &last_byte);
166 mask >>= 1;
167 i++;
168 } while (i < 16);
169
170 outw(0x10, port);
171 mutex_unlock(&card->lock);
172}
173
174
175static void gemtek_pci_mute(struct gemtek_pci *card)
176{
177 mutex_lock(&card->lock);
178 outb(0x1f, card->iobase);
179 card->mute = true;
180 mutex_unlock(&card->lock);
181}
182
183static void gemtek_pci_unmute(struct gemtek_pci *card)
184{
185 if (card->mute) {
186 gemtek_pci_setfrequency(card, card->current_frequency);
187 card->mute = false;
188 }
189}
190
191static int gemtek_pci_getsignal(struct gemtek_pci *card)
192{
193 int sig;
194
195 mutex_lock(&card->lock);
196 sig = (inb(card->iobase) & 0x08) ? 0 : 1;
197 mutex_unlock(&card->lock);
198 return sig;
199}
200
201static int vidioc_querycap(struct file *file, void *priv,
202 struct v4l2_capability *v)
203{
204 struct gemtek_pci *card = video_drvdata(file);
205
206 strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
207 strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
208 snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(card->pdev));
209 v->version = RADIO_VERSION;
210 v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
211 return 0;
212}
213
214static int vidioc_g_tuner(struct file *file, void *priv,
215 struct v4l2_tuner *v)
216{
217 struct gemtek_pci *card = video_drvdata(file);
218
219 if (v->index > 0)
220 return -EINVAL;
221
222 strlcpy(v->name, "FM", sizeof(v->name));
223 v->type = V4L2_TUNER_RADIO;
224 v->rangelow = GEMTEK_PCI_RANGE_LOW;
225 v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
226 v->rxsubchans = V4L2_TUNER_SUB_MONO;
227 v->capability = V4L2_TUNER_CAP_LOW;
228 v->audmode = V4L2_TUNER_MODE_MONO;
229 v->signal = 0xffff * gemtek_pci_getsignal(card);
230 return 0;
231}
232
233static int vidioc_s_tuner(struct file *file, void *priv,
234 struct v4l2_tuner *v)
235{
236 return v->index ? -EINVAL : 0;
237}
238
239static int vidioc_s_frequency(struct file *file, void *priv,
240 struct v4l2_frequency *f)
241{
242 struct gemtek_pci *card = video_drvdata(file);
243
244 if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
245 return -EINVAL;
246 if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
247 f->frequency > GEMTEK_PCI_RANGE_HIGH)
248 return -EINVAL;
249 gemtek_pci_setfrequency(card, f->frequency);
250 card->mute = false;
251 return 0;
252}
253
254static int vidioc_g_frequency(struct file *file, void *priv,
255 struct v4l2_frequency *f)
256{
257 struct gemtek_pci *card = video_drvdata(file);
258
259 if (f->tuner != 0)
260 return -EINVAL;
261 f->type = V4L2_TUNER_RADIO;
262 f->frequency = card->current_frequency;
263 return 0;
264}
265
266static int vidioc_queryctrl(struct file *file, void *priv,
267 struct v4l2_queryctrl *qc)
268{
269 switch (qc->id) {
270 case V4L2_CID_AUDIO_MUTE:
271 return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
272 case V4L2_CID_AUDIO_VOLUME:
273 return v4l2_ctrl_query_fill(qc, 0, 65535, 65535, 65535);
274 }
275 return -EINVAL;
276}
277
278static int vidioc_g_ctrl(struct file *file, void *priv,
279 struct v4l2_control *ctrl)
280{
281 struct gemtek_pci *card = video_drvdata(file);
282
283 switch (ctrl->id) {
284 case V4L2_CID_AUDIO_MUTE:
285 ctrl->value = card->mute;
286 return 0;
287 case V4L2_CID_AUDIO_VOLUME:
288 if (card->mute)
289 ctrl->value = 0;
290 else
291 ctrl->value = 65535;
292 return 0;
293 }
294 return -EINVAL;
295}
296
297static int vidioc_s_ctrl(struct file *file, void *priv,
298 struct v4l2_control *ctrl)
299{
300 struct gemtek_pci *card = video_drvdata(file);
301
302 switch (ctrl->id) {
303 case V4L2_CID_AUDIO_MUTE:
304 if (ctrl->value)
305 gemtek_pci_mute(card);
306 else
307 gemtek_pci_unmute(card);
308 return 0;
309 case V4L2_CID_AUDIO_VOLUME:
310 if (ctrl->value)
311 gemtek_pci_unmute(card);
312 else
313 gemtek_pci_mute(card);
314 return 0;
315 }
316 return -EINVAL;
317}
318
319static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
320{
321 *i = 0;
322 return 0;
323}
324
325static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
326{
327 return i ? -EINVAL : 0;
328}
329
330static int vidioc_g_audio(struct file *file, void *priv,
331 struct v4l2_audio *a)
332{
333 a->index = 0;
334 strlcpy(a->name, "Radio", sizeof(a->name));
335 a->capability = V4L2_AUDCAP_STEREO;
336 return 0;
337}
338
339static int vidioc_s_audio(struct file *file, void *priv,
340 struct v4l2_audio *a)
341{
342 return a->index ? -EINVAL : 0;
343}
344
345enum {
346 GEMTEK_PR103
347};
348
349static char *card_names[] __devinitdata = {
350 "GEMTEK_PR103"
351};
352
353static struct pci_device_id gemtek_pci_id[] =
354{
355 { PCI_VENDOR_ID_GEMTEK, PCI_DEVICE_ID_GEMTEK_PR103,
356 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GEMTEK_PR103 },
357 { 0 }
358};
359
360MODULE_DEVICE_TABLE(pci, gemtek_pci_id);
361
362static const struct v4l2_file_operations gemtek_pci_fops = {
363 .owner = THIS_MODULE,
364 .unlocked_ioctl = video_ioctl2,
365};
366
367static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = {
368 .vidioc_querycap = vidioc_querycap,
369 .vidioc_g_tuner = vidioc_g_tuner,
370 .vidioc_s_tuner = vidioc_s_tuner,
371 .vidioc_g_audio = vidioc_g_audio,
372 .vidioc_s_audio = vidioc_s_audio,
373 .vidioc_g_input = vidioc_g_input,
374 .vidioc_s_input = vidioc_s_input,
375 .vidioc_g_frequency = vidioc_g_frequency,
376 .vidioc_s_frequency = vidioc_s_frequency,
377 .vidioc_queryctrl = vidioc_queryctrl,
378 .vidioc_g_ctrl = vidioc_g_ctrl,
379 .vidioc_s_ctrl = vidioc_s_ctrl,
380};
381
382static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
383{
384 struct gemtek_pci *card;
385 struct v4l2_device *v4l2_dev;
386 int res;
387
388 card = kzalloc(sizeof(struct gemtek_pci), GFP_KERNEL);
389 if (card == NULL) {
390 dev_err(&pdev->dev, "out of memory\n");
391 return -ENOMEM;
392 }
393
394 v4l2_dev = &card->v4l2_dev;
395 mutex_init(&card->lock);
396 card->pdev = pdev;
397
398 strlcpy(v4l2_dev->name, "gemtek_pci", sizeof(v4l2_dev->name));
399
400 res = v4l2_device_register(&pdev->dev, v4l2_dev);
401 if (res < 0) {
402 v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
403 kfree(card);
404 return res;
405 }
406
407 if (pci_enable_device(pdev))
408 goto err_pci;
409
410 card->iobase = pci_resource_start(pdev, 0);
411 card->length = pci_resource_len(pdev, 0);
412
413 if (request_region(card->iobase, card->length, card_names[pci_id->driver_data]) == NULL) {
414 v4l2_err(v4l2_dev, "i/o port already in use\n");
415 goto err_pci;
416 }
417
418 strlcpy(card->vdev.name, v4l2_dev->name, sizeof(card->vdev.name));
419 card->vdev.v4l2_dev = v4l2_dev;
420 card->vdev.fops = &gemtek_pci_fops;
421 card->vdev.ioctl_ops = &gemtek_pci_ioctl_ops;
422 card->vdev.release = video_device_release_empty;
423 video_set_drvdata(&card->vdev, card);
424
425 gemtek_pci_mute(card);
426
427 if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0)
428 goto err_video;
429
430 v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
431 pdev->revision, card->iobase, card->iobase + card->length - 1);
432
433 return 0;
434
435err_video:
436 release_region(card->iobase, card->length);
437
438err_pci:
439 v4l2_device_unregister(v4l2_dev);
440 kfree(card);
441 return -ENODEV;
442}
443
444static void __devexit gemtek_pci_remove(struct pci_dev *pdev)
445{
446 struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
447 struct gemtek_pci *card = to_gemtek_pci(v4l2_dev);
448
449 video_unregister_device(&card->vdev);
450 v4l2_device_unregister(v4l2_dev);
451
452 release_region(card->iobase, card->length);
453
454 if (mx)
455 gemtek_pci_mute(card);
456
457 kfree(card);
458}
459
460static struct pci_driver gemtek_pci_driver = {
461 .name = "gemtek_pci",
462 .id_table = gemtek_pci_id,
463 .probe = gemtek_pci_probe,
464 .remove = __devexit_p(gemtek_pci_remove),
465};
466
467static int __init gemtek_pci_init(void)
468{
469 return pci_register_driver(&gemtek_pci_driver);
470}
471
472static void __exit gemtek_pci_exit(void)
473{
474 pci_unregister_driver(&gemtek_pci_driver);
475}
476
477module_init(gemtek_pci_init);
478module_exit(gemtek_pci_exit);
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 6459a220b0dd..5c2a9058c09f 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -77,8 +77,8 @@ MODULE_PARM_DESC(debug, "activates debug info");
77/* TEA5757 pin mappings */ 77/* TEA5757 pin mappings */
78static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16; 78static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
79 79
80#define FREQ_LO (50 * 16000) 80#define FREQ_LO (87 * 16000)
81#define FREQ_HI (150 * 16000) 81#define FREQ_HI (108 * 16000)
82 82
83#define FREQ_IF 171200 /* 10.7*16000 */ 83#define FREQ_IF 171200 /* 10.7*16000 */
84#define FREQ_STEP 200 /* 12.5*16 */ 84#define FREQ_STEP 200 /* 12.5*16 */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index dd6bd364efa0..7ecc8e657663 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@ static const struct v4l2_file_operations wl1273_fops = {
1407 .read = wl1273_fm_fops_read, 1407 .read = wl1273_fm_fops_read,
1408 .write = wl1273_fm_fops_write, 1408 .write = wl1273_fm_fops_write,
1409 .poll = wl1273_fm_fops_poll, 1409 .poll = wl1273_fm_fops_poll,
1410 .ioctl = video_ioctl2, 1410 .unlocked_ioctl = video_ioctl2,
1411 .open = wl1273_fm_fops_open, 1411 .open = wl1273_fm_fops_open,
1412 .release = wl1273_fm_fops_release, 1412 .release = wl1273_fm_fops_release,
1413}; 1413};
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index ac76dfe5b3fa..60c176fe328e 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -357,7 +357,8 @@ int si470x_start(struct si470x_device *radio)
357 goto done; 357 goto done;
358 358
359 /* sysconfig 1 */ 359 /* sysconfig 1 */
360 radio->registers[SYSCONFIG1] = SYSCONFIG1_DE; 360 radio->registers[SYSCONFIG1] =
361 (de << 11) & SYSCONFIG1_DE; /* DE*/
361 retval = si470x_set_register(radio, SYSCONFIG1); 362 retval = si470x_set_register(radio, SYSCONFIG1);
362 if (retval < 0) 363 if (retval < 0)
363 goto done; 364 goto done;
@@ -687,12 +688,8 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
687 /* driver constants */ 688 /* driver constants */
688 strcpy(tuner->name, "FM"); 689 strcpy(tuner->name, "FM");
689 tuner->type = V4L2_TUNER_RADIO; 690 tuner->type = V4L2_TUNER_RADIO;
690#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
691 tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | 691 tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
692 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO; 692 V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
693#else
694 tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
695#endif
696 693
697 /* range limits */ 694 /* range limits */
698 switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) { 695 switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
@@ -718,12 +715,10 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
718 tuner->rxsubchans = V4L2_TUNER_SUB_MONO; 715 tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
719 else 716 else
720 tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; 717 tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
721#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
722 /* If there is a reliable method of detecting an RDS channel, 718 /* If there is a reliable method of detecting an RDS channel,
723 then this code should check for that before setting this 719 then this code should check for that before setting this
724 RDS subchannel. */ 720 RDS subchannel. */
725 tuner->rxsubchans |= V4L2_TUNER_SUB_RDS; 721 tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
726#endif
727 722
728 /* mono/stereo selector */ 723 /* mono/stereo selector */
729 if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0) 724 if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 80b3c319f698..1ac49139158d 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -446,27 +446,27 @@ static void ene_rx_setup(struct ene_device *dev)
446 446
447select_timeout: 447select_timeout:
448 if (dev->rx_fan_input_inuse) { 448 if (dev->rx_fan_input_inuse) {
449 dev->rdev->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN); 449 dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
450 450
451 /* Fan input doesn't support timeouts, it just ends the 451 /* Fan input doesn't support timeouts, it just ends the
452 input with a maximum sample */ 452 input with a maximum sample */
453 dev->rdev->min_timeout = dev->rdev->max_timeout = 453 dev->rdev->min_timeout = dev->rdev->max_timeout =
454 MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK * 454 US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
455 ENE_FW_SAMPLE_PERIOD_FAN); 455 ENE_FW_SAMPLE_PERIOD_FAN);
456 } else { 456 } else {
457 dev->rdev->rx_resolution = MS_TO_NS(sample_period); 457 dev->rdev->rx_resolution = US_TO_NS(sample_period);
458 458
459 /* Theoreticly timeout is unlimited, but we cap it 459 /* Theoreticly timeout is unlimited, but we cap it
460 * because it was seen that on one device, it 460 * because it was seen that on one device, it
461 * would stop sending spaces after around 250 msec. 461 * would stop sending spaces after around 250 msec.
462 * Besides, this is close to 2^32 anyway and timeout is u32. 462 * Besides, this is close to 2^32 anyway and timeout is u32.
463 */ 463 */
464 dev->rdev->min_timeout = MS_TO_NS(127 * sample_period); 464 dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
465 dev->rdev->max_timeout = MS_TO_NS(200000); 465 dev->rdev->max_timeout = US_TO_NS(200000);
466 } 466 }
467 467
468 if (dev->hw_learning_and_tx_capable) 468 if (dev->hw_learning_and_tx_capable)
469 dev->rdev->tx_resolution = MS_TO_NS(sample_period); 469 dev->rdev->tx_resolution = US_TO_NS(sample_period);
470 470
471 if (dev->rdev->timeout > dev->rdev->max_timeout) 471 if (dev->rdev->timeout > dev->rdev->max_timeout)
472 dev->rdev->timeout = dev->rdev->max_timeout; 472 dev->rdev->timeout = dev->rdev->max_timeout;
@@ -801,7 +801,7 @@ static irqreturn_t ene_isr(int irq, void *data)
801 801
802 dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space"); 802 dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
803 803
804 ev.duration = MS_TO_NS(hw_sample); 804 ev.duration = US_TO_NS(hw_sample);
805 ev.pulse = pulse; 805 ev.pulse = pulse;
806 ir_raw_event_store_with_filter(dev->rdev, &ev); 806 ir_raw_event_store_with_filter(dev->rdev, &ev);
807 } 807 }
@@ -821,7 +821,7 @@ static void ene_setup_default_settings(struct ene_device *dev)
821 dev->learning_mode_enabled = learning_mode_force; 821 dev->learning_mode_enabled = learning_mode_force;
822 822
823 /* Set reasonable default timeout */ 823 /* Set reasonable default timeout */
824 dev->rdev->timeout = MS_TO_NS(150000); 824 dev->rdev->timeout = US_TO_NS(150000);
825} 825}
826 826
827/* Upload all hardware settings at once. Used at load and resume time */ 827/* Upload all hardware settings at once. Used at load and resume time */
@@ -1004,6 +1004,10 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1004 /* validate resources */ 1004 /* validate resources */
1005 error = -ENODEV; 1005 error = -ENODEV;
1006 1006
1007 /* init these to -1, as 0 is valid for both */
1008 dev->hw_io = -1;
1009 dev->irq = -1;
1010
1007 if (!pnp_port_valid(pnp_dev, 0) || 1011 if (!pnp_port_valid(pnp_dev, 0) ||
1008 pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE) 1012 pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
1009 goto error; 1013 goto error;
@@ -1072,6 +1076,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1072 rdev->input_name = "ENE eHome Infrared Remote Transceiver"; 1076 rdev->input_name = "ENE eHome Infrared Remote Transceiver";
1073 } 1077 }
1074 1078
1079 dev->rdev = rdev;
1080
1075 ene_rx_setup_hw_buffer(dev); 1081 ene_rx_setup_hw_buffer(dev);
1076 ene_setup_default_settings(dev); 1082 ene_setup_default_settings(dev);
1077 ene_setup_hw_settings(dev); 1083 ene_setup_hw_settings(dev);
@@ -1083,7 +1089,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1083 if (error < 0) 1089 if (error < 0)
1084 goto error; 1090 goto error;
1085 1091
1086 dev->rdev = rdev;
1087 ene_notice("driver has been succesfully loaded"); 1092 ene_notice("driver has been succesfully loaded");
1088 return 0; 1093 return 0;
1089error: 1094error:
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index c179baf34cb4..337a41d4450b 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -201,8 +201,6 @@
201#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__) 201#define dbg_verbose(format, ...) __dbg(2, format, ## __VA_ARGS__)
202#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__) 202#define dbg_regs(format, ...) __dbg(3, format, ## __VA_ARGS__)
203 203
204#define MS_TO_NS(msec) ((msec) * 1000)
205
206struct ene_device { 204struct ene_device {
207 struct pnp_dev *pnp_dev; 205 struct pnp_dev *pnp_dev;
208 struct rc_dev *rdev; 206 struct rc_dev *rdev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6811512b4e83..e7dc6b46fdfa 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -988,7 +988,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
988 int retval; 988 int retval;
989 struct imon_context *ictx = rc->priv; 989 struct imon_context *ictx = rc->priv;
990 struct device *dev = ictx->dev; 990 struct device *dev = ictx->dev;
991 bool pad_mouse;
992 unsigned char ir_proto_packet[] = { 991 unsigned char ir_proto_packet[] = {
993 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; 992 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
994 993
@@ -1000,29 +999,20 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
1000 case RC_TYPE_RC6: 999 case RC_TYPE_RC6:
1001 dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); 1000 dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
1002 ir_proto_packet[0] = 0x01; 1001 ir_proto_packet[0] = 0x01;
1003 pad_mouse = false;
1004 break; 1002 break;
1005 case RC_TYPE_UNKNOWN: 1003 case RC_TYPE_UNKNOWN:
1006 case RC_TYPE_OTHER: 1004 case RC_TYPE_OTHER:
1007 dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); 1005 dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
1008 if (pad_stabilize && !nomouse) 1006 if (!pad_stabilize)
1009 pad_mouse = true;
1010 else {
1011 dev_dbg(dev, "PAD stabilize functionality disabled\n"); 1007 dev_dbg(dev, "PAD stabilize functionality disabled\n");
1012 pad_mouse = false;
1013 }
1014 /* ir_proto_packet[0] = 0x00; // already the default */ 1008 /* ir_proto_packet[0] = 0x00; // already the default */
1015 rc_type = RC_TYPE_OTHER; 1009 rc_type = RC_TYPE_OTHER;
1016 break; 1010 break;
1017 default: 1011 default:
1018 dev_warn(dev, "Unsupported IR protocol specified, overriding " 1012 dev_warn(dev, "Unsupported IR protocol specified, overriding "
1019 "to iMON IR protocol\n"); 1013 "to iMON IR protocol\n");
1020 if (pad_stabilize && !nomouse) 1014 if (!pad_stabilize)
1021 pad_mouse = true;
1022 else {
1023 dev_dbg(dev, "PAD stabilize functionality disabled\n"); 1015 dev_dbg(dev, "PAD stabilize functionality disabled\n");
1024 pad_mouse = false;
1025 }
1026 /* ir_proto_packet[0] = 0x00; // already the default */ 1016 /* ir_proto_packet[0] = 0x00; // already the default */
1027 rc_type = RC_TYPE_OTHER; 1017 rc_type = RC_TYPE_OTHER;
1028 break; 1018 break;
@@ -1035,7 +1025,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
1035 goto out; 1025 goto out;
1036 1026
1037 ictx->rc_type = rc_type; 1027 ictx->rc_type = rc_type;
1038 ictx->pad_mouse = pad_mouse; 1028 ictx->pad_mouse = false;
1039 1029
1040out: 1030out:
1041 return retval; 1031 return retval;
@@ -1517,7 +1507,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
1517 spin_unlock_irqrestore(&ictx->kc_lock, flags); 1507 spin_unlock_irqrestore(&ictx->kc_lock, flags);
1518 return; 1508 return;
1519 } else { 1509 } else {
1520 ictx->pad_mouse = 0; 1510 ictx->pad_mouse = false;
1521 dev_dbg(dev, "mouse mode disabled, passing key value\n"); 1511 dev_dbg(dev, "mouse mode disabled, passing key value\n");
1522 } 1512 }
1523 } 1513 }
@@ -1756,7 +1746,6 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
1756 printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte); 1746 printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
1757 1747
1758 ictx->display_type = detected_display_type; 1748 ictx->display_type = detected_display_type;
1759 ictx->rdev->allowed_protos = allowed_protos;
1760 ictx->rc_type = allowed_protos; 1749 ictx->rc_type = allowed_protos;
1761} 1750}
1762 1751
@@ -1839,10 +1828,6 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
1839 rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */ 1828 rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
1840 rdev->change_protocol = imon_ir_change_protocol; 1829 rdev->change_protocol = imon_ir_change_protocol;
1841 rdev->driver_name = MOD_NAME; 1830 rdev->driver_name = MOD_NAME;
1842 if (ictx->rc_type == RC_TYPE_RC6)
1843 rdev->map_name = RC_MAP_IMON_MCE;
1844 else
1845 rdev->map_name = RC_MAP_IMON_PAD;
1846 1831
1847 /* Enable front-panel buttons and/or knobs */ 1832 /* Enable front-panel buttons and/or knobs */
1848 memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet)); 1833 memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
@@ -1851,11 +1836,18 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
1851 if (ret) 1836 if (ret)
1852 dev_info(ictx->dev, "panel buttons/knobs setup failed\n"); 1837 dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
1853 1838
1854 if (ictx->product == 0xffdc) 1839 if (ictx->product == 0xffdc) {
1855 imon_get_ffdc_type(ictx); 1840 imon_get_ffdc_type(ictx);
1841 rdev->allowed_protos = ictx->rc_type;
1842 }
1856 1843
1857 imon_set_display_type(ictx); 1844 imon_set_display_type(ictx);
1858 1845
1846 if (ictx->rc_type == RC_TYPE_RC6)
1847 rdev->map_name = RC_MAP_IMON_MCE;
1848 else
1849 rdev->map_name = RC_MAP_IMON_PAD;
1850
1859 ret = rc_register_device(rdev); 1851 ret = rc_register_device(rdev);
1860 if (ret < 0) { 1852 if (ret < 0) {
1861 dev_err(ictx->dev, "remote input dev register failed\n"); 1853 dev_err(ictx->dev, "remote input dev register failed\n");
@@ -2108,18 +2100,6 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
2108 goto find_endpoint_failed; 2100 goto find_endpoint_failed;
2109 } 2101 }
2110 2102
2111 ictx->idev = imon_init_idev(ictx);
2112 if (!ictx->idev) {
2113 dev_err(dev, "%s: input device setup failed\n", __func__);
2114 goto idev_setup_failed;
2115 }
2116
2117 ictx->rdev = imon_init_rdev(ictx);
2118 if (!ictx->rdev) {
2119 dev_err(dev, "%s: rc device setup failed\n", __func__);
2120 goto rdev_setup_failed;
2121 }
2122
2123 usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, 2103 usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
2124 usb_rcvintpipe(ictx->usbdev_intf0, 2104 usb_rcvintpipe(ictx->usbdev_intf0,
2125 ictx->rx_endpoint_intf0->bEndpointAddress), 2105 ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -2133,13 +2113,25 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
2133 goto urb_submit_failed; 2113 goto urb_submit_failed;
2134 } 2114 }
2135 2115
2116 ictx->idev = imon_init_idev(ictx);
2117 if (!ictx->idev) {
2118 dev_err(dev, "%s: input device setup failed\n", __func__);
2119 goto idev_setup_failed;
2120 }
2121
2122 ictx->rdev = imon_init_rdev(ictx);
2123 if (!ictx->rdev) {
2124 dev_err(dev, "%s: rc device setup failed\n", __func__);
2125 goto rdev_setup_failed;
2126 }
2127
2136 return ictx; 2128 return ictx;
2137 2129
2138urb_submit_failed:
2139 rc_unregister_device(ictx->rdev);
2140rdev_setup_failed: 2130rdev_setup_failed:
2141 input_unregister_device(ictx->idev); 2131 input_unregister_device(ictx->idev);
2142idev_setup_failed: 2132idev_setup_failed:
2133 usb_kill_urb(ictx->rx_urb_intf0);
2134urb_submit_failed:
2143find_endpoint_failed: 2135find_endpoint_failed:
2144 mutex_unlock(&ictx->lock); 2136 mutex_unlock(&ictx->lock);
2145 usb_free_urb(tx_urb); 2137 usb_free_urb(tx_urb);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d9dea1..1c5cc65ea1e1 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
1/* ir-lirc-codec.c - ir-core to classic lirc interface bridge 1/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
2 * 2 *
3 * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> 3 * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
4 * 4 *
@@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
47 /* Carrier reports */ 47 /* Carrier reports */
48 if (ev.carrier_report) { 48 if (ev.carrier_report) {
49 sample = LIRC_FREQUENCY(ev.carrier); 49 sample = LIRC_FREQUENCY(ev.carrier);
50 IR_dprintk(2, "carrier report (freq: %d)\n", sample);
50 51
51 /* Packet end */ 52 /* Packet end */
52 } else if (ev.timeout) { 53 } else if (ev.timeout) {
@@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
62 return 0; 63 return 0;
63 64
64 sample = LIRC_TIMEOUT(ev.duration / 1000); 65 sample = LIRC_TIMEOUT(ev.duration / 1000);
66 IR_dprintk(2, "timeout report (duration: %d)\n", sample);
65 67
66 /* Normal sample */ 68 /* Normal sample */
67 } else { 69 } else {
@@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
85 87
86 sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) : 88 sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
87 LIRC_SPACE(ev.duration / 1000); 89 LIRC_SPACE(ev.duration / 1000);
90 IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
91 TO_US(ev.duration), TO_STR(ev.pulse));
88 } 92 }
89 93
90 lirc_buffer_write(dev->raw->lirc.drv->rbuf, 94 lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 185baddcbf14..73230ff93b8a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
233 233
234/* used internally by the sysfs interface */ 234/* used internally by the sysfs interface */
235u64 235u64
236ir_raw_get_allowed_protocols() 236ir_raw_get_allowed_protocols(void)
237{ 237{
238 u64 protocols; 238 u64 protocols;
239 mutex_lock(&ir_raw_handler_lock); 239 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index c59851b203da..7a5f5300caf9 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -19,35 +19,35 @@
19 19
20static struct rc_map_table dib0700_nec_table[] = { 20static struct rc_map_table dib0700_nec_table[] = {
21 /* Key codes for the Pixelview SBTVD remote */ 21 /* Key codes for the Pixelview SBTVD remote */
22 { 0x8613, KEY_MUTE }, 22 { 0x866b13, KEY_MUTE },
23 { 0x8612, KEY_POWER }, 23 { 0x866b12, KEY_POWER },
24 { 0x8601, KEY_1 }, 24 { 0x866b01, KEY_1 },
25 { 0x8602, KEY_2 }, 25 { 0x866b02, KEY_2 },
26 { 0x8603, KEY_3 }, 26 { 0x866b03, KEY_3 },
27 { 0x8604, KEY_4 }, 27 { 0x866b04, KEY_4 },
28 { 0x8605, KEY_5 }, 28 { 0x866b05, KEY_5 },
29 { 0x8606, KEY_6 }, 29 { 0x866b06, KEY_6 },
30 { 0x8607, KEY_7 }, 30 { 0x866b07, KEY_7 },
31 { 0x8608, KEY_8 }, 31 { 0x866b08, KEY_8 },
32 { 0x8609, KEY_9 }, 32 { 0x866b09, KEY_9 },
33 { 0x8600, KEY_0 }, 33 { 0x866b00, KEY_0 },
34 { 0x860d, KEY_CHANNELUP }, 34 { 0x866b0d, KEY_CHANNELUP },
35 { 0x8619, KEY_CHANNELDOWN }, 35 { 0x866b19, KEY_CHANNELDOWN },
36 { 0x8610, KEY_VOLUMEUP }, 36 { 0x866b10, KEY_VOLUMEUP },
37 { 0x860c, KEY_VOLUMEDOWN }, 37 { 0x866b0c, KEY_VOLUMEDOWN },
38 38
39 { 0x860a, KEY_CAMERA }, 39 { 0x866b0a, KEY_CAMERA },
40 { 0x860b, KEY_ZOOM }, 40 { 0x866b0b, KEY_ZOOM },
41 { 0x861b, KEY_BACKSPACE }, 41 { 0x866b1b, KEY_BACKSPACE },
42 { 0x8615, KEY_ENTER }, 42 { 0x866b15, KEY_ENTER },
43 43
44 { 0x861d, KEY_UP }, 44 { 0x866b1d, KEY_UP },
45 { 0x861e, KEY_DOWN }, 45 { 0x866b1e, KEY_DOWN },
46 { 0x860e, KEY_LEFT }, 46 { 0x866b0e, KEY_LEFT },
47 { 0x860f, KEY_RIGHT }, 47 { 0x866b0f, KEY_RIGHT },
48 48
49 { 0x8618, KEY_RECORD }, 49 { 0x866b18, KEY_RECORD },
50 { 0x861a, KEY_STOP }, 50 { 0x866b1a, KEY_STOP },
51 51
52 /* Key codes for the EvolutePC TVWay+ remote */ 52 /* Key codes for the EvolutePC TVWay+ remote */
53 { 0x7a00, KEY_MENU }, 53 { 0x7a00, KEY_MENU },
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337875d1..2f5dc0622b94 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> 4 * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
5 * 5 *
6 * See http://mediacenterguides.com/book/export/html/31 for details on
7 * key mappings.
8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = {
60 { 0x800f0426, KEY_EPG }, /* Guide */ 63 { 0x800f0426, KEY_EPG }, /* Guide */
61 { 0x800f0427, KEY_ZOOM }, /* Aspect */ 64 { 0x800f0427, KEY_ZOOM }, /* Aspect */
62 65
66 { 0x800f0432, KEY_MODE }, /* Visualization */
67 { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */
68 { 0x800f0434, KEY_EJECTCD },
63 { 0x800f043a, KEY_BRIGHTNESSUP }, 69 { 0x800f043a, KEY_BRIGHTNESSUP },
64 70
65 { 0x800f0446, KEY_TV }, 71 { 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0fef6efad537..6df0a4980645 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -48,7 +48,6 @@
48#define USB_BUFLEN 32 /* USB reception buffer length */ 48#define USB_BUFLEN 32 /* USB reception buffer length */
49#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ 49#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
50#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ 50#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
51#define MS_TO_NS(msec) ((msec) * 1000)
52 51
53/* MCE constants */ 52/* MCE constants */
54#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */ 53#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
@@ -817,7 +816,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
817 switch (ir->buf_in[index]) { 816 switch (ir->buf_in[index]) {
818 /* 2-byte return value commands */ 817 /* 2-byte return value commands */
819 case MCE_CMD_S_TIMEOUT: 818 case MCE_CMD_S_TIMEOUT:
820 ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2); 819 ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
821 break; 820 break;
822 821
823 /* 1-byte return value commands */ 822 /* 1-byte return value commands */
@@ -856,9 +855,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
856 break; 855 break;
857 case PARSE_IRDATA: 856 case PARSE_IRDATA:
858 ir->rem--; 857 ir->rem--;
858 init_ir_raw_event(&rawir);
859 rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); 859 rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
860 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) 860 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
861 * MS_TO_NS(MCE_TIME_UNIT); 861 * US_TO_NS(MCE_TIME_UNIT);
862 862
863 dev_dbg(ir->dev, "Storing %s with duration %d\n", 863 dev_dbg(ir->dev, "Storing %s with duration %d\n",
864 rawir.pulse ? "pulse" : "space", 864 rawir.pulse ? "pulse" : "space",
@@ -884,6 +884,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
884 i, ir->rem + 1, false); 884 i, ir->rem + 1, false);
885 if (ir->rem) 885 if (ir->rem)
886 ir->parser_state = PARSE_IRDATA; 886 ir->parser_state = PARSE_IRDATA;
887 else
888 ir_raw_event_reset(ir->rc);
887 break; 889 break;
888 } 890 }
889 891
@@ -1061,7 +1063,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
1061 rc->priv = ir; 1063 rc->priv = ir;
1062 rc->driver_type = RC_DRIVER_IR_RAW; 1064 rc->driver_type = RC_DRIVER_IR_RAW;
1063 rc->allowed_protos = RC_TYPE_ALL; 1065 rc->allowed_protos = RC_TYPE_ALL;
1064 rc->timeout = MS_TO_NS(1000); 1066 rc->timeout = US_TO_NS(1000);
1065 if (!ir->flags.no_tx) { 1067 if (!ir->flags.no_tx) {
1066 rc->s_tx_mask = mceusb_set_tx_mask; 1068 rc->s_tx_mask = mceusb_set_tx_mask;
1067 rc->s_tx_carrier = mceusb_set_tx_carrier; 1069 rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8ef80b..273d9d674792 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
460 return 0; 460 return 0;
461 } 461 }
462 462
463 carrier = (count * 1000000) / duration; 463 carrier = MS_TO_NS(count) / duration;
464 464
465 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) 465 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
466 nvt_dbg("WTF? Carrier frequency out of range!"); 466 nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
612 sample = nvt->buf[i]; 612 sample = nvt->buf[i];
613 613
614 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 614 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
615 rawir.duration = (sample & BUF_LEN_MASK) 615 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
616 * SAMPLE_PERIOD * 1000; 616 * SAMPLE_PERIOD);
617 617
618 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 618 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
619 if (nvt->rawir.pulse == rawir.pulse) 619 if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 72be8a02118c..512a2f4ada0e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -458,21 +458,27 @@ static int ir_getkeycode(struct input_dev *idev,
458 index = ir_lookup_by_scancode(rc_map, scancode); 458 index = ir_lookup_by_scancode(rc_map, scancode);
459 } 459 }
460 460
461 if (index >= rc_map->len) { 461 if (index < rc_map->len) {
462 if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) 462 entry = &rc_map->scan[index];
463 IR_dprintk(1, "unknown key for scancode 0x%04x\n", 463
464 scancode); 464 ke->index = index;
465 ke->keycode = entry->keycode;
466 ke->len = sizeof(entry->scancode);
467 memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
468
469 } else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
470 /*
471 * We do not really know the valid range of scancodes
472 * so let's respond with KEY_RESERVED to anything we
473 * do not have mapping for [yet].
474 */
475 ke->index = index;
476 ke->keycode = KEY_RESERVED;
477 } else {
465 retval = -EINVAL; 478 retval = -EINVAL;
466 goto out; 479 goto out;
467 } 480 }
468 481
469 entry = &rc_map->scan[index];
470
471 ke->index = index;
472 ke->keycode = entry->keycode;
473 ke->len = sizeof(entry->scancode);
474 memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
475
476 retval = 0; 482 retval = 0;
477 483
478out: 484out:
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c2abfb..e435d94c0776 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
164 sz->signal_start.tv_usec - 164 sz->signal_start.tv_usec -
165 sz->signal_last.tv_usec); 165 sz->signal_last.tv_usec);
166 rawir.duration -= sz->sum; 166 rawir.duration -= sz->sum;
167 rawir.duration *= 1000; 167 rawir.duration = US_TO_NS(rawir.duration);
168 rawir.duration &= IR_MAX_DURATION; 168 rawir.duration &= IR_MAX_DURATION;
169 } 169 }
170 sz_push(sz, rawir); 170 sz_push(sz, rawir);
@@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
177 rawir.duration = ((int) value) * SZ_RESOLUTION; 177 rawir.duration = ((int) value) * SZ_RESOLUTION;
178 rawir.duration += SZ_RESOLUTION / 2; 178 rawir.duration += SZ_RESOLUTION / 2;
179 sz->sum += rawir.duration; 179 sz->sum += rawir.duration;
180 rawir.duration *= 1000; 180 rawir.duration = US_TO_NS(rawir.duration);
181 rawir.duration &= IR_MAX_DURATION; 181 rawir.duration &= IR_MAX_DURATION;
182 sz_push(sz, rawir); 182 sz_push(sz, rawir);
183} 183}
@@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz,
197 rawir.duration = ((int) value) * SZ_RESOLUTION; 197 rawir.duration = ((int) value) * SZ_RESOLUTION;
198 rawir.duration += SZ_RESOLUTION / 2; 198 rawir.duration += SZ_RESOLUTION / 2;
199 sz->sum += rawir.duration; 199 sz->sum += rawir.duration;
200 rawir.duration *= 1000; 200 rawir.duration = US_TO_NS(rawir.duration);
201 sz_push(sz, rawir); 201 sz_push(sz, rawir);
202} 202}
203 203
@@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb)
273 if (sz->timeout_enabled) 273 if (sz->timeout_enabled)
274 sz_push(sz, rawir); 274 sz_push(sz, rawir);
275 ir_raw_event_handle(sz->rdev); 275 ir_raw_event_handle(sz->rdev);
276 ir_raw_event_reset(sz->rdev);
276 } else { 277 } else {
277 sz_push_full_space(sz, sz->buf_in[i]); 278 sz_push_full_space(sz, sz->buf_in[i]);
278 } 279 }
@@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb)
290 } 291 }
291 } 292 }
292 293
294 ir_raw_event_handle(sz->rdev);
293 usb_submit_urb(urb, GFP_ATOMIC); 295 usb_submit_urb(urb, GFP_ATOMIC);
294 296
295 return; 297 return;
@@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
430 sz->decoder_state = PulseSpace; 432 sz->decoder_state = PulseSpace;
431 /* FIXME: don't yet have a way to set this */ 433 /* FIXME: don't yet have a way to set this */
432 sz->timeout_enabled = true; 434 sz->timeout_enabled = true;
433 sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) & 435 sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
434 IR_MAX_DURATION) | 0x03000000); 436 IR_MAX_DURATION) | 0x03000000);
435 #if 0 437 #if 0
436 /* not yet supported, depends on patches from maxim */ 438 /* not yet supported, depends on patches from maxim */
437 /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ 439 /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
438 sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; 440 sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
439 sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; 441 sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
440 #endif 442 #endif
441 443
442 do_gettimeofday(&sz->signal_start); 444 do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index eb875af05e79..aa021600e9df 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -78,7 +78,7 @@ config VIDEO_FIXED_MINOR_RANGES
78 78
79config VIDEO_HELPER_CHIPS_AUTO 79config VIDEO_HELPER_CHIPS_AUTO
80 bool "Autoselect pertinent encoders/decoders and other helper chips" 80 bool "Autoselect pertinent encoders/decoders and other helper chips"
81 default y if !EMBEDDED 81 default y if !EXPERT
82 ---help--- 82 ---help---
83 Most video cards may require additional modules to encode or 83 Most video cards may require additional modules to encode or
84 decode audio/video standards. This option will autoselect 84 decode audio/video standards. This option will autoselect
@@ -141,15 +141,6 @@ config VIDEO_TDA9840
141 To compile this driver as a module, choose M here: the 141 To compile this driver as a module, choose M here: the
142 module will be called tda9840. 142 module will be called tda9840.
143 143
144config VIDEO_TDA9875
145 tristate "Philips TDA9875 audio processor"
146 depends on VIDEO_V4L2 && I2C
147 ---help---
148 Support for tda9875 audio decoder chip found on some bt8xx boards.
149
150 To compile this driver as a module, choose M here: the
151 module will be called tda9875.
152
153config VIDEO_TEA6415C 144config VIDEO_TEA6415C
154 tristate "Philips TEA6415C audio processor" 145 tristate "Philips TEA6415C audio processor"
155 depends on I2C 146 depends on I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 81e38cb0b846..a509d317e258 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
27obj-$(CONFIG_VIDEO_TUNER) += tuner.o 27obj-$(CONFIG_VIDEO_TUNER) += tuner.o
28obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o 28obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
29obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o 29obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
30obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
31obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o 30obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
32obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o 31obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
33obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o 32obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f318b51448b3..d2327dbb473f 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -303,11 +303,22 @@ static int adv7175_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ide
303 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0); 303 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
304} 304}
305 305
306static int adv7175_s_power(struct v4l2_subdev *sd, int on)
307{
308 if (on)
309 adv7175_write(sd, 0x01, 0x00);
310 else
311 adv7175_write(sd, 0x01, 0x78);
312
313 return 0;
314}
315
306/* ----------------------------------------------------------------------- */ 316/* ----------------------------------------------------------------------- */
307 317
308static const struct v4l2_subdev_core_ops adv7175_core_ops = { 318static const struct v4l2_subdev_core_ops adv7175_core_ops = {
309 .g_chip_ident = adv7175_g_chip_ident, 319 .g_chip_ident = adv7175_g_chip_ident,
310 .init = adv7175_init, 320 .init = adv7175_init,
321 .s_power = adv7175_s_power,
311}; 322};
312 323
313static const struct v4l2_subdev_video_ops adv7175_video_ops = { 324static const struct v4l2_subdev_video_ops adv7175_video_ops = {
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 49efcf660ba6..7f58756d72c8 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -1373,7 +1373,6 @@ struct tvcard bttv_tvcards[] = {
1373 .gpiomute = 0x1800, 1373 .gpiomute = 0x1800,
1374 .audio_mode_gpio= fv2000s_audio, 1374 .audio_mode_gpio= fv2000s_audio,
1375 .no_msp34xx = 1, 1375 .no_msp34xx = 1,
1376 .no_tda9875 = 1,
1377 .needs_tvaudio = 1, 1376 .needs_tvaudio = 1,
1378 .pll = PLL_28, 1377 .pll = PLL_28,
1379 .tuner_type = TUNER_PHILIPS_PAL, 1378 .tuner_type = TUNER_PHILIPS_PAL,
@@ -1511,7 +1510,6 @@ struct tvcard bttv_tvcards[] = {
1511 .gpiomute = 0x09, 1510 .gpiomute = 0x09,
1512 .needs_tvaudio = 1, 1511 .needs_tvaudio = 1,
1513 .no_msp34xx = 1, 1512 .no_msp34xx = 1,
1514 .no_tda9875 = 1,
1515 .pll = PLL_28, 1513 .pll = PLL_28,
1516 .tuner_type = TUNER_PHILIPS_PAL, 1514 .tuner_type = TUNER_PHILIPS_PAL,
1517 .tuner_addr = ADDR_UNSET, 1515 .tuner_addr = ADDR_UNSET,
@@ -1550,7 +1548,6 @@ struct tvcard bttv_tvcards[] = {
1550 .gpiomask2 = 0x07ff, 1548 .gpiomask2 = 0x07ff,
1551 .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), 1549 .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
1552 .no_msp34xx = 1, 1550 .no_msp34xx = 1,
1553 .no_tda9875 = 1,
1554 .tuner_type = TUNER_ABSENT, 1551 .tuner_type = TUNER_ABSENT,
1555 .tuner_addr = ADDR_UNSET, 1552 .tuner_addr = ADDR_UNSET,
1556 .muxsel_hook = rv605_muxsel, 1553 .muxsel_hook = rv605_muxsel,
@@ -1686,7 +1683,6 @@ struct tvcard bttv_tvcards[] = {
1686 .tuner_type = TUNER_ABSENT, 1683 .tuner_type = TUNER_ABSENT,
1687 .tuner_addr = ADDR_UNSET, 1684 .tuner_addr = ADDR_UNSET,
1688 .no_msp34xx = 1, 1685 .no_msp34xx = 1,
1689 .no_tda9875 = 1,
1690 .no_tda7432 = 1, 1686 .no_tda7432 = 1,
1691 }, 1687 },
1692 [BTTV_BOARD_OSPREY1x0_848] = { 1688 [BTTV_BOARD_OSPREY1x0_848] = {
@@ -1699,7 +1695,6 @@ struct tvcard bttv_tvcards[] = {
1699 .tuner_type = TUNER_ABSENT, 1695 .tuner_type = TUNER_ABSENT,
1700 .tuner_addr = ADDR_UNSET, 1696 .tuner_addr = ADDR_UNSET,
1701 .no_msp34xx = 1, 1697 .no_msp34xx = 1,
1702 .no_tda9875 = 1,
1703 .no_tda7432 = 1, 1698 .no_tda7432 = 1,
1704 }, 1699 },
1705 1700
@@ -1714,7 +1709,6 @@ struct tvcard bttv_tvcards[] = {
1714 .tuner_type = TUNER_ABSENT, 1709 .tuner_type = TUNER_ABSENT,
1715 .tuner_addr = ADDR_UNSET, 1710 .tuner_addr = ADDR_UNSET,
1716 .no_msp34xx = 1, 1711 .no_msp34xx = 1,
1717 .no_tda9875 = 1,
1718 .no_tda7432 = 1, 1712 .no_tda7432 = 1,
1719 }, 1713 },
1720 [BTTV_BOARD_OSPREY1x1] = { 1714 [BTTV_BOARD_OSPREY1x1] = {
@@ -1727,7 +1721,6 @@ struct tvcard bttv_tvcards[] = {
1727 .tuner_type = TUNER_ABSENT, 1721 .tuner_type = TUNER_ABSENT,
1728 .tuner_addr = ADDR_UNSET, 1722 .tuner_addr = ADDR_UNSET,
1729 .no_msp34xx = 1, 1723 .no_msp34xx = 1,
1730 .no_tda9875 = 1,
1731 .no_tda7432 = 1, 1724 .no_tda7432 = 1,
1732 }, 1725 },
1733 [BTTV_BOARD_OSPREY1x1_SVID] = { 1726 [BTTV_BOARD_OSPREY1x1_SVID] = {
@@ -1740,7 +1733,6 @@ struct tvcard bttv_tvcards[] = {
1740 .tuner_type = TUNER_ABSENT, 1733 .tuner_type = TUNER_ABSENT,
1741 .tuner_addr = ADDR_UNSET, 1734 .tuner_addr = ADDR_UNSET,
1742 .no_msp34xx = 1, 1735 .no_msp34xx = 1,
1743 .no_tda9875 = 1,
1744 .no_tda7432 = 1, 1736 .no_tda7432 = 1,
1745 }, 1737 },
1746 [BTTV_BOARD_OSPREY2xx] = { 1738 [BTTV_BOARD_OSPREY2xx] = {
@@ -1753,7 +1745,6 @@ struct tvcard bttv_tvcards[] = {
1753 .tuner_type = TUNER_ABSENT, 1745 .tuner_type = TUNER_ABSENT,
1754 .tuner_addr = ADDR_UNSET, 1746 .tuner_addr = ADDR_UNSET,
1755 .no_msp34xx = 1, 1747 .no_msp34xx = 1,
1756 .no_tda9875 = 1,
1757 .no_tda7432 = 1, 1748 .no_tda7432 = 1,
1758 }, 1749 },
1759 1750
@@ -1768,7 +1759,6 @@ struct tvcard bttv_tvcards[] = {
1768 .tuner_type = TUNER_ABSENT, 1759 .tuner_type = TUNER_ABSENT,
1769 .tuner_addr = ADDR_UNSET, 1760 .tuner_addr = ADDR_UNSET,
1770 .no_msp34xx = 1, 1761 .no_msp34xx = 1,
1771 .no_tda9875 = 1,
1772 .no_tda7432 = 1, 1762 .no_tda7432 = 1,
1773 }, 1763 },
1774 [BTTV_BOARD_OSPREY2x0] = { 1764 [BTTV_BOARD_OSPREY2x0] = {
@@ -1781,7 +1771,6 @@ struct tvcard bttv_tvcards[] = {
1781 .tuner_type = TUNER_ABSENT, 1771 .tuner_type = TUNER_ABSENT,
1782 .tuner_addr = ADDR_UNSET, 1772 .tuner_addr = ADDR_UNSET,
1783 .no_msp34xx = 1, 1773 .no_msp34xx = 1,
1784 .no_tda9875 = 1,
1785 .no_tda7432 = 1, 1774 .no_tda7432 = 1,
1786 }, 1775 },
1787 [BTTV_BOARD_OSPREY500] = { 1776 [BTTV_BOARD_OSPREY500] = {
@@ -1794,7 +1783,6 @@ struct tvcard bttv_tvcards[] = {
1794 .tuner_type = TUNER_ABSENT, 1783 .tuner_type = TUNER_ABSENT,
1795 .tuner_addr = ADDR_UNSET, 1784 .tuner_addr = ADDR_UNSET,
1796 .no_msp34xx = 1, 1785 .no_msp34xx = 1,
1797 .no_tda9875 = 1,
1798 .no_tda7432 = 1, 1786 .no_tda7432 = 1,
1799 }, 1787 },
1800 [BTTV_BOARD_OSPREY540] = { 1788 [BTTV_BOARD_OSPREY540] = {
@@ -1805,7 +1793,6 @@ struct tvcard bttv_tvcards[] = {
1805 .tuner_type = TUNER_ABSENT, 1793 .tuner_type = TUNER_ABSENT,
1806 .tuner_addr = ADDR_UNSET, 1794 .tuner_addr = ADDR_UNSET,
1807 .no_msp34xx = 1, 1795 .no_msp34xx = 1,
1808 .no_tda9875 = 1,
1809 .no_tda7432 = 1, 1796 .no_tda7432 = 1,
1810 }, 1797 },
1811 1798
@@ -1820,7 +1807,6 @@ struct tvcard bttv_tvcards[] = {
1820 .tuner_type = TUNER_ABSENT, 1807 .tuner_type = TUNER_ABSENT,
1821 .tuner_addr = ADDR_UNSET, 1808 .tuner_addr = ADDR_UNSET,
1822 .no_msp34xx = 1, 1809 .no_msp34xx = 1,
1823 .no_tda9875 = 1,
1824 .no_tda7432 = 1, /* must avoid, conflicts with the bt860 */ 1810 .no_tda7432 = 1, /* must avoid, conflicts with the bt860 */
1825 }, 1811 },
1826 [BTTV_BOARD_IDS_EAGLE] = { 1812 [BTTV_BOARD_IDS_EAGLE] = {
@@ -1835,7 +1821,6 @@ struct tvcard bttv_tvcards[] = {
1835 .muxsel = MUXSEL(2, 2, 2, 2), 1821 .muxsel = MUXSEL(2, 2, 2, 2),
1836 .muxsel_hook = eagle_muxsel, 1822 .muxsel_hook = eagle_muxsel,
1837 .no_msp34xx = 1, 1823 .no_msp34xx = 1,
1838 .no_tda9875 = 1,
1839 .pll = PLL_28, 1824 .pll = PLL_28,
1840 }, 1825 },
1841 [BTTV_BOARD_PINNACLESAT] = { 1826 [BTTV_BOARD_PINNACLESAT] = {
@@ -1846,7 +1831,6 @@ struct tvcard bttv_tvcards[] = {
1846 .tuner_type = TUNER_ABSENT, 1831 .tuner_type = TUNER_ABSENT,
1847 .tuner_addr = ADDR_UNSET, 1832 .tuner_addr = ADDR_UNSET,
1848 .no_msp34xx = 1, 1833 .no_msp34xx = 1,
1849 .no_tda9875 = 1,
1850 .no_tda7432 = 1, 1834 .no_tda7432 = 1,
1851 .muxsel = MUXSEL(3, 1), 1835 .muxsel = MUXSEL(3, 1),
1852 .pll = PLL_28, 1836 .pll = PLL_28,
@@ -1897,7 +1881,6 @@ struct tvcard bttv_tvcards[] = {
1897 .svhs = 2, 1881 .svhs = 2,
1898 .gpiomask = 0, 1882 .gpiomask = 0,
1899 .no_msp34xx = 1, 1883 .no_msp34xx = 1,
1900 .no_tda9875 = 1,
1901 .no_tda7432 = 1, 1884 .no_tda7432 = 1,
1902 .muxsel = MUXSEL(2, 0, 1), 1885 .muxsel = MUXSEL(2, 0, 1),
1903 .pll = PLL_28, 1886 .pll = PLL_28,
@@ -1970,7 +1953,6 @@ struct tvcard bttv_tvcards[] = {
1970 /* Tuner, CVid, SVid, CVid over SVid connector */ 1953 /* Tuner, CVid, SVid, CVid over SVid connector */
1971 .muxsel = MUXSEL(2, 3, 1, 1), 1954 .muxsel = MUXSEL(2, 3, 1, 1),
1972 .gpiomask = 0, 1955 .gpiomask = 0,
1973 .no_tda9875 = 1,
1974 .no_tda7432 = 1, 1956 .no_tda7432 = 1,
1975 .tuner_type = TUNER_PHILIPS_PAL_I, 1957 .tuner_type = TUNER_PHILIPS_PAL_I,
1976 .tuner_addr = ADDR_UNSET, 1958 .tuner_addr = ADDR_UNSET,
@@ -2017,7 +1999,6 @@ struct tvcard bttv_tvcards[] = {
2017 .muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0), 1999 .muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0),
2018 .muxsel_hook = xguard_muxsel, 2000 .muxsel_hook = xguard_muxsel,
2019 .no_msp34xx = 1, 2001 .no_msp34xx = 1,
2020 .no_tda9875 = 1,
2021 .no_tda7432 = 1, 2002 .no_tda7432 = 1,
2022 .pll = PLL_28, 2003 .pll = PLL_28,
2023 }, 2004 },
@@ -2029,7 +2010,6 @@ struct tvcard bttv_tvcards[] = {
2029 .svhs = NO_SVHS, 2010 .svhs = NO_SVHS,
2030 .muxsel = MUXSEL(2, 3, 1, 0), 2011 .muxsel = MUXSEL(2, 3, 1, 0),
2031 .no_msp34xx = 1, 2012 .no_msp34xx = 1,
2032 .no_tda9875 = 1,
2033 .no_tda7432 = 1, 2013 .no_tda7432 = 1,
2034 .pll = PLL_28, 2014 .pll = PLL_28,
2035 .tuner_type = TUNER_ABSENT, 2015 .tuner_type = TUNER_ABSENT,
@@ -2134,7 +2114,6 @@ struct tvcard bttv_tvcards[] = {
2134 .svhs = NO_SVHS, /* card has no svhs */ 2114 .svhs = NO_SVHS, /* card has no svhs */
2135 .needs_tvaudio = 0, 2115 .needs_tvaudio = 0,
2136 .no_msp34xx = 1, 2116 .no_msp34xx = 1,
2137 .no_tda9875 = 1,
2138 .no_tda7432 = 1, 2117 .no_tda7432 = 1,
2139 .gpiomask = 0x00, 2118 .gpiomask = 0x00,
2140 .muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 2119 .muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
@@ -2156,7 +2135,6 @@ struct tvcard bttv_tvcards[] = {
2156 [BTTV_BOARD_TWINHAN_DST] = { 2135 [BTTV_BOARD_TWINHAN_DST] = {
2157 .name = "Twinhan DST + clones", 2136 .name = "Twinhan DST + clones",
2158 .no_msp34xx = 1, 2137 .no_msp34xx = 1,
2159 .no_tda9875 = 1,
2160 .no_tda7432 = 1, 2138 .no_tda7432 = 1,
2161 .tuner_type = TUNER_ABSENT, 2139 .tuner_type = TUNER_ABSENT,
2162 .tuner_addr = ADDR_UNSET, 2140 .tuner_addr = ADDR_UNSET,
@@ -2171,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
2171 /* Vid In, SVid In, Vid over SVid in connector */ 2149 /* Vid In, SVid In, Vid over SVid in connector */
2172 .muxsel = MUXSEL(3, 1, 1, 3), 2150 .muxsel = MUXSEL(3, 1, 1, 3),
2173 .no_msp34xx = 1, 2151 .no_msp34xx = 1,
2174 .no_tda9875 = 1,
2175 .no_tda7432 = 1, 2152 .no_tda7432 = 1,
2176 .tuner_type = TUNER_ABSENT, 2153 .tuner_type = TUNER_ABSENT,
2177 .tuner_addr = ADDR_UNSET, 2154 .tuner_addr = ADDR_UNSET,
@@ -2226,7 +2203,6 @@ struct tvcard bttv_tvcards[] = {
2226 .svhs = NO_SVHS, 2203 .svhs = NO_SVHS,
2227 .muxsel = MUXSEL(2, 3, 1, 0), 2204 .muxsel = MUXSEL(2, 3, 1, 0),
2228 .no_msp34xx = 1, 2205 .no_msp34xx = 1,
2229 .no_tda9875 = 1,
2230 .no_tda7432 = 1, 2206 .no_tda7432 = 1,
2231 .needs_tvaudio = 0, 2207 .needs_tvaudio = 0,
2232 .tuner_type = TUNER_ABSENT, 2208 .tuner_type = TUNER_ABSENT,
@@ -2278,7 +2254,6 @@ struct tvcard bttv_tvcards[] = {
2278 .gpiomask = 0, 2254 .gpiomask = 0,
2279 .gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/ 2255 .gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
2280 .no_msp34xx = 1, 2256 .no_msp34xx = 1,
2281 .no_tda9875 = 1,
2282 .no_tda7432 = 1, 2257 .no_tda7432 = 1,
2283 /*878A input is always MUX0, see above.*/ 2258 /*878A input is always MUX0, see above.*/
2284 .muxsel = MUXSEL(2, 2, 2, 2), 2259 .muxsel = MUXSEL(2, 2, 2, 2),
@@ -2302,7 +2277,6 @@ struct tvcard bttv_tvcards[] = {
2302 .tuner_type = TUNER_TEMIC_PAL, 2277 .tuner_type = TUNER_TEMIC_PAL,
2303 .tuner_addr = ADDR_UNSET, 2278 .tuner_addr = ADDR_UNSET,
2304 .no_msp34xx = 1, 2279 .no_msp34xx = 1,
2305 .no_tda9875 = 1,
2306 }, 2280 },
2307 [BTTV_BOARD_AVDVBT_771] = { 2281 [BTTV_BOARD_AVDVBT_771] = {
2308 /* Wolfram Joost <wojo@frokaschwei.de> */ 2282 /* Wolfram Joost <wojo@frokaschwei.de> */
@@ -2313,7 +2287,6 @@ struct tvcard bttv_tvcards[] = {
2313 .tuner_addr = ADDR_UNSET, 2287 .tuner_addr = ADDR_UNSET,
2314 .muxsel = MUXSEL(3, 3), 2288 .muxsel = MUXSEL(3, 3),
2315 .no_msp34xx = 1, 2289 .no_msp34xx = 1,
2316 .no_tda9875 = 1,
2317 .no_tda7432 = 1, 2290 .no_tda7432 = 1,
2318 .pll = PLL_28, 2291 .pll = PLL_28,
2319 .has_dvb = 1, 2292 .has_dvb = 1,
@@ -2329,7 +2302,6 @@ struct tvcard bttv_tvcards[] = {
2329 .svhs = 1, 2302 .svhs = 1,
2330 .muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */ 2303 .muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */
2331 .no_msp34xx = 1, 2304 .no_msp34xx = 1,
2332 .no_tda9875 = 1,
2333 .no_tda7432 = 1, 2305 .no_tda7432 = 1,
2334 .pll = PLL_28, 2306 .pll = PLL_28,
2335 .tuner_type = TUNER_ABSENT, 2307 .tuner_type = TUNER_ABSENT,
@@ -2393,7 +2365,6 @@ struct tvcard bttv_tvcards[] = {
2393 /* Chris Pascoe <c.pascoe@itee.uq.edu.au> */ 2365 /* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
2394 .name = "DViCO FusionHDTV DVB-T Lite", 2366 .name = "DViCO FusionHDTV DVB-T Lite",
2395 .no_msp34xx = 1, 2367 .no_msp34xx = 1,
2396 .no_tda9875 = 1,
2397 .no_tda7432 = 1, 2368 .no_tda7432 = 1,
2398 .pll = PLL_28, 2369 .pll = PLL_28,
2399 .no_video = 1, 2370 .no_video = 1,
@@ -2440,7 +2411,6 @@ struct tvcard bttv_tvcards[] = {
2440 .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), 2411 .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
2441 .pll = PLL_28, 2412 .pll = PLL_28,
2442 .no_msp34xx = 1, 2413 .no_msp34xx = 1,
2443 .no_tda9875 = 1,
2444 .no_tda7432 = 1, 2414 .no_tda7432 = 1,
2445 .tuner_type = TUNER_ABSENT, 2415 .tuner_type = TUNER_ABSENT,
2446 .tuner_addr = ADDR_UNSET, 2416 .tuner_addr = ADDR_UNSET,
@@ -2478,7 +2448,6 @@ struct tvcard bttv_tvcards[] = {
2478 .pll = PLL_28, 2448 .pll = PLL_28,
2479 .no_msp34xx = 1, 2449 .no_msp34xx = 1,
2480 .no_tda7432 = 1, 2450 .no_tda7432 = 1,
2481 .no_tda9875 = 1,
2482 .muxsel_hook = kodicom4400r_muxsel, 2451 .muxsel_hook = kodicom4400r_muxsel,
2483 }, 2452 },
2484 [BTTV_BOARD_KODICOM_4400R_SL] = { 2453 [BTTV_BOARD_KODICOM_4400R_SL] = {
@@ -2500,7 +2469,6 @@ struct tvcard bttv_tvcards[] = {
2500 .pll = PLL_28, 2469 .pll = PLL_28,
2501 .no_msp34xx = 1, 2470 .no_msp34xx = 1,
2502 .no_tda7432 = 1, 2471 .no_tda7432 = 1,
2503 .no_tda9875 = 1,
2504 .muxsel_hook = kodicom4400r_muxsel, 2472 .muxsel_hook = kodicom4400r_muxsel,
2505 }, 2473 },
2506 /* ---- card 0x86---------------------------------- */ 2474 /* ---- card 0x86---------------------------------- */
@@ -2530,7 +2498,6 @@ struct tvcard bttv_tvcards[] = {
2530 .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, 2498 .gpiomux = { 0x00400005, 0, 0x00000001, 0 },
2531 .gpiomute = 0x00c00007, 2499 .gpiomute = 0x00c00007,
2532 .no_msp34xx = 1, 2500 .no_msp34xx = 1,
2533 .no_tda9875 = 1,
2534 .no_tda7432 = 1, 2501 .no_tda7432 = 1,
2535 .has_dvb = 1, 2502 .has_dvb = 1,
2536 }, 2503 },
@@ -2630,7 +2597,6 @@ struct tvcard bttv_tvcards[] = {
2630 .tuner_type = TUNER_ABSENT, 2597 .tuner_type = TUNER_ABSENT,
2631 .tuner_addr = ADDR_UNSET, 2598 .tuner_addr = ADDR_UNSET,
2632 .no_msp34xx = 1, 2599 .no_msp34xx = 1,
2633 .no_tda9875 = 1,
2634 .no_tda7432 = 1, 2600 .no_tda7432 = 1,
2635 }, 2601 },
2636 /* ---- card 0x8d ---------------------------------- */ 2602 /* ---- card 0x8d ---------------------------------- */
@@ -2658,7 +2624,6 @@ struct tvcard bttv_tvcards[] = {
2658 .muxsel = MUXSEL(2, 3, 1, 1), 2624 .muxsel = MUXSEL(2, 3, 1, 1),
2659 .gpiomux = { 100000, 100002, 100002, 100000 }, 2625 .gpiomux = { 100000, 100002, 100002, 100000 },
2660 .no_msp34xx = 1, 2626 .no_msp34xx = 1,
2661 .no_tda9875 = 1,
2662 .no_tda7432 = 1, 2627 .no_tda7432 = 1,
2663 .pll = PLL_28, 2628 .pll = PLL_28,
2664 .tuner_type = TUNER_TNF_5335MF, 2629 .tuner_type = TUNER_TNF_5335MF,
@@ -2674,7 +2639,6 @@ struct tvcard bttv_tvcards[] = {
2674 .gpiomask = 0x0f, /* old: 7 */ 2639 .gpiomask = 0x0f, /* old: 7 */
2675 .muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */ 2640 .muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */
2676 .no_msp34xx = 1, 2641 .no_msp34xx = 1,
2677 .no_tda9875 = 1,
2678 .no_tda7432 = 1, 2642 .no_tda7432 = 1,
2679 .tuner_type = TUNER_ABSENT, 2643 .tuner_type = TUNER_ABSENT,
2680 .tuner_addr = ADDR_UNSET, 2644 .tuner_addr = ADDR_UNSET,
@@ -2732,7 +2696,6 @@ struct tvcard bttv_tvcards[] = {
2732 .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, 2696 .gpiomux = { 0x00400005, 0, 0x00000001, 0 },
2733 .gpiomute = 0x00c00007, 2697 .gpiomute = 0x00c00007,
2734 .no_msp34xx = 1, 2698 .no_msp34xx = 1,
2735 .no_tda9875 = 1,
2736 .no_tda7432 = 1, 2699 .no_tda7432 = 1,
2737 }, 2700 },
2738 /* ---- card 0x95---------------------------------- */ 2701 /* ---- card 0x95---------------------------------- */
@@ -2874,7 +2837,6 @@ struct tvcard bttv_tvcards[] = {
2874 .pll = PLL_28, 2837 .pll = PLL_28,
2875 .no_msp34xx = 1, 2838 .no_msp34xx = 1,
2876 .no_tda7432 = 1, 2839 .no_tda7432 = 1,
2877 .no_tda9875 = 1,
2878 .muxsel_hook = gv800s_muxsel, 2840 .muxsel_hook = gv800s_muxsel,
2879 }, 2841 },
2880 [BTTV_BOARD_GEOVISION_GV800S_SL] = { 2842 [BTTV_BOARD_GEOVISION_GV800S_SL] = {
@@ -2899,7 +2861,6 @@ struct tvcard bttv_tvcards[] = {
2899 .pll = PLL_28, 2861 .pll = PLL_28,
2900 .no_msp34xx = 1, 2862 .no_msp34xx = 1,
2901 .no_tda7432 = 1, 2863 .no_tda7432 = 1,
2902 .no_tda9875 = 1,
2903 .muxsel_hook = gv800s_muxsel, 2864 .muxsel_hook = gv800s_muxsel,
2904 }, 2865 },
2905 [BTTV_BOARD_PV183] = { 2866 [BTTV_BOARD_PV183] = {
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index fd62bf15d779..c6333595c6b9 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -234,7 +234,6 @@ struct tvcard {
234 234
235 /* i2c audio flags */ 235 /* i2c audio flags */
236 unsigned int no_msp34xx:1; 236 unsigned int no_msp34xx:1;
237 unsigned int no_tda9875:1;
238 unsigned int no_tda7432:1; 237 unsigned int no_tda7432:1;
239 unsigned int needs_tvaudio:1; 238 unsigned int needs_tvaudio:1;
240 unsigned int msp34xx_alt:1; 239 unsigned int msp34xx_alt:1;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 789087cd6a9c..55ffd60ffa7f 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2001,6 +2001,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
2001 .min_width = 320, 2001 .min_width = 320,
2002 .min_height = 240, 2002 .min_height = 240,
2003 }; 2003 };
2004 struct i2c_board_info ov7670_info = {
2005 .type = "ov7670",
2006 .addr = 0x42,
2007 .platform_data = &sensor_cfg,
2008 };
2004 2009
2005 /* 2010 /*
2006 * Start putting together one of our big camera structures. 2011 * Start putting together one of our big camera structures.
@@ -2062,9 +2067,9 @@ static int cafe_pci_probe(struct pci_dev *pdev,
2062 if (dmi_check_system(olpc_xo1_dmi)) 2067 if (dmi_check_system(olpc_xo1_dmi))
2063 sensor_cfg.clock_speed = 45; 2068 sensor_cfg.clock_speed = 45;
2064 2069
2065 cam->sensor_addr = 0x42; 2070 cam->sensor_addr = ov7670_info.addr;
2066 cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter, 2071 cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
2067 "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL); 2072 &ov7670_info, NULL);
2068 if (cam->sensor == NULL) { 2073 if (cam->sensor == NULL) {
2069 ret = -ENODEV; 2074 ret = -ENODEV;
2070 goto out_smbus; 2075 goto out_smbus;
@@ -2184,9 +2189,7 @@ static int cafe_pci_resume(struct pci_dev *pdev)
2184 struct cafe_camera *cam = to_cam(v4l2_dev); 2189 struct cafe_camera *cam = to_cam(v4l2_dev);
2185 int ret = 0; 2190 int ret = 0;
2186 2191
2187 ret = pci_restore_state(pdev); 2192 pci_restore_state(pdev);
2188 if (ret)
2189 return ret;
2190 ret = pci_enable_device(pdev); 2193 ret = pci_enable_device(pdev);
2191 2194
2192 if (ret) { 2195 if (ret) {
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 916c13d5cf7d..6d6d1843791c 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -378,7 +378,7 @@ struct cpia2_fh {
378 378
379struct camera_data { 379struct camera_data {
380 /* locks */ 380 /* locks */
381 struct mutex busy_lock; /* guard against SMP multithreading */ 381 struct mutex v4l2_lock; /* serialize file operations */
382 struct v4l2_prio_state prio; 382 struct v4l2_prio_state prio;
383 383
384 /* camera status */ 384 /* camera status */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 9606bc01b803..aaffca8e13fd 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -2247,7 +2247,7 @@ struct camera_data *cpia2_init_camera_struct(void)
2247 2247
2248 2248
2249 cam->present = 1; 2249 cam->present = 1;
2250 mutex_init(&cam->busy_lock); 2250 mutex_init(&cam->v4l2_lock);
2251 init_waitqueue_head(&cam->wq_stream); 2251 init_waitqueue_head(&cam->wq_stream);
2252 2252
2253 return cam; 2253 return cam;
@@ -2365,9 +2365,9 @@ long cpia2_read(struct camera_data *cam,
2365 char __user *buf, unsigned long count, int noblock) 2365 char __user *buf, unsigned long count, int noblock)
2366{ 2366{
2367 struct framebuf *frame; 2367 struct framebuf *frame;
2368 if (!count) { 2368
2369 if (!count)
2369 return 0; 2370 return 0;
2370 }
2371 2371
2372 if (!buf) { 2372 if (!buf) {
2373 ERR("%s: buffer NULL\n",__func__); 2373 ERR("%s: buffer NULL\n",__func__);
@@ -2379,17 +2379,12 @@ long cpia2_read(struct camera_data *cam,
2379 return -EINVAL; 2379 return -EINVAL;
2380 } 2380 }
2381 2381
2382 /* make this _really_ smp and multithread-safe */
2383 if (mutex_lock_interruptible(&cam->busy_lock))
2384 return -ERESTARTSYS;
2385
2386 if (!cam->present) { 2382 if (!cam->present) {
2387 LOG("%s: camera removed\n",__func__); 2383 LOG("%s: camera removed\n",__func__);
2388 mutex_unlock(&cam->busy_lock);
2389 return 0; /* EOF */ 2384 return 0; /* EOF */
2390 } 2385 }
2391 2386
2392 if(!cam->streaming) { 2387 if (!cam->streaming) {
2393 /* Start streaming */ 2388 /* Start streaming */
2394 cpia2_usb_stream_start(cam, 2389 cpia2_usb_stream_start(cam,
2395 cam->params.camera_state.stream_mode); 2390 cam->params.camera_state.stream_mode);
@@ -2398,42 +2393,31 @@ long cpia2_read(struct camera_data *cam,
2398 /* Copy cam->curbuff in case it changes while we're processing */ 2393 /* Copy cam->curbuff in case it changes while we're processing */
2399 frame = cam->curbuff; 2394 frame = cam->curbuff;
2400 if (noblock && frame->status != FRAME_READY) { 2395 if (noblock && frame->status != FRAME_READY) {
2401 mutex_unlock(&cam->busy_lock);
2402 return -EAGAIN; 2396 return -EAGAIN;
2403 } 2397 }
2404 2398
2405 if(frame->status != FRAME_READY) { 2399 if (frame->status != FRAME_READY) {
2406 mutex_unlock(&cam->busy_lock); 2400 mutex_unlock(&cam->v4l2_lock);
2407 wait_event_interruptible(cam->wq_stream, 2401 wait_event_interruptible(cam->wq_stream,
2408 !cam->present || 2402 !cam->present ||
2409 (frame = cam->curbuff)->status == FRAME_READY); 2403 (frame = cam->curbuff)->status == FRAME_READY);
2404 mutex_lock(&cam->v4l2_lock);
2410 if (signal_pending(current)) 2405 if (signal_pending(current))
2411 return -ERESTARTSYS; 2406 return -ERESTARTSYS;
2412 /* make this _really_ smp and multithread-safe */ 2407 if (!cam->present)
2413 if (mutex_lock_interruptible(&cam->busy_lock)) {
2414 return -ERESTARTSYS;
2415 }
2416 if(!cam->present) {
2417 mutex_unlock(&cam->busy_lock);
2418 return 0; 2408 return 0;
2419 }
2420 } 2409 }
2421 2410
2422 /* copy data to user space */ 2411 /* copy data to user space */
2423 if (frame->length > count) { 2412 if (frame->length > count)
2424 mutex_unlock(&cam->busy_lock);
2425 return -EFAULT; 2413 return -EFAULT;
2426 } 2414 if (copy_to_user(buf, frame->data, frame->length))
2427 if (copy_to_user(buf, frame->data, frame->length)) {
2428 mutex_unlock(&cam->busy_lock);
2429 return -EFAULT; 2415 return -EFAULT;
2430 }
2431 2416
2432 count = frame->length; 2417 count = frame->length;
2433 2418
2434 frame->status = FRAME_EMPTY; 2419 frame->status = FRAME_EMPTY;
2435 2420
2436 mutex_unlock(&cam->busy_lock);
2437 return count; 2421 return count;
2438} 2422}
2439 2423
@@ -2447,17 +2431,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
2447{ 2431{
2448 unsigned int status=0; 2432 unsigned int status=0;
2449 2433
2450 if(!cam) { 2434 if (!cam) {
2451 ERR("%s: Internal error, camera_data not found!\n",__func__); 2435 ERR("%s: Internal error, camera_data not found!\n",__func__);
2452 return POLLERR; 2436 return POLLERR;
2453 } 2437 }
2454 2438
2455 mutex_lock(&cam->busy_lock); 2439 if (!cam->present)
2456
2457 if(!cam->present) {
2458 mutex_unlock(&cam->busy_lock);
2459 return POLLHUP; 2440 return POLLHUP;
2460 }
2461 2441
2462 if(!cam->streaming) { 2442 if(!cam->streaming) {
2463 /* Start streaming */ 2443 /* Start streaming */
@@ -2465,16 +2445,13 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
2465 cam->params.camera_state.stream_mode); 2445 cam->params.camera_state.stream_mode);
2466 } 2446 }
2467 2447
2468 mutex_unlock(&cam->busy_lock);
2469 poll_wait(filp, &cam->wq_stream, wait); 2448 poll_wait(filp, &cam->wq_stream, wait);
2470 mutex_lock(&cam->busy_lock);
2471 2449
2472 if(!cam->present) 2450 if(!cam->present)
2473 status = POLLHUP; 2451 status = POLLHUP;
2474 else if(cam->curbuff->status == FRAME_READY) 2452 else if(cam->curbuff->status == FRAME_READY)
2475 status = POLLIN | POLLRDNORM; 2453 status = POLLIN | POLLRDNORM;
2476 2454
2477 mutex_unlock(&cam->busy_lock);
2478 return status; 2455 return status;
2479} 2456}
2480 2457
@@ -2496,29 +2473,19 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
2496 2473
2497 DBG("mmap offset:%ld size:%ld\n", start_offset, size); 2474 DBG("mmap offset:%ld size:%ld\n", start_offset, size);
2498 2475
2499 /* make this _really_ smp-safe */ 2476 if (!cam->present)
2500 if (mutex_lock_interruptible(&cam->busy_lock))
2501 return -ERESTARTSYS;
2502
2503 if (!cam->present) {
2504 mutex_unlock(&cam->busy_lock);
2505 return -ENODEV; 2477 return -ENODEV;
2506 }
2507 2478
2508 if (size > cam->frame_size*cam->num_frames || 2479 if (size > cam->frame_size*cam->num_frames ||
2509 (start_offset % cam->frame_size) != 0 || 2480 (start_offset % cam->frame_size) != 0 ||
2510 (start_offset+size > cam->frame_size*cam->num_frames)) { 2481 (start_offset+size > cam->frame_size*cam->num_frames))
2511 mutex_unlock(&cam->busy_lock);
2512 return -EINVAL; 2482 return -EINVAL;
2513 }
2514 2483
2515 pos = ((unsigned long) (cam->frame_buffer)) + start_offset; 2484 pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
2516 while (size > 0) { 2485 while (size > 0) {
2517 page = kvirt_to_pa(pos); 2486 page = kvirt_to_pa(pos);
2518 if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) { 2487 if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
2519 mutex_unlock(&cam->busy_lock);
2520 return -EAGAIN; 2488 return -EAGAIN;
2521 }
2522 start += PAGE_SIZE; 2489 start += PAGE_SIZE;
2523 pos += PAGE_SIZE; 2490 pos += PAGE_SIZE;
2524 if (size > PAGE_SIZE) 2491 if (size > PAGE_SIZE)
@@ -2528,7 +2495,5 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
2528 } 2495 }
2529 2496
2530 cam->mmapped = true; 2497 cam->mmapped = true;
2531 mutex_unlock(&cam->busy_lock);
2532 return 0; 2498 return 0;
2533} 2499}
2534
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 7edf80b0d01a..9bad39842936 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -238,59 +238,40 @@ static struct v4l2_queryctrl controls[] = {
238static int cpia2_open(struct file *file) 238static int cpia2_open(struct file *file)
239{ 239{
240 struct camera_data *cam = video_drvdata(file); 240 struct camera_data *cam = video_drvdata(file);
241 int retval = 0; 241 struct cpia2_fh *fh;
242 242
243 if (!cam) { 243 if (!cam) {
244 ERR("Internal error, camera_data not found!\n"); 244 ERR("Internal error, camera_data not found!\n");
245 return -ENODEV; 245 return -ENODEV;
246 } 246 }
247 247
248 if(mutex_lock_interruptible(&cam->busy_lock)) 248 if (!cam->present)
249 return -ERESTARTSYS; 249 return -ENODEV;
250
251 if(!cam->present) {
252 retval = -ENODEV;
253 goto err_return;
254 }
255 250
256 if (cam->open_count > 0) { 251 if (cam->open_count == 0) {
257 goto skip_init; 252 if (cpia2_allocate_buffers(cam))
258 } 253 return -ENOMEM;
259 254
260 if (cpia2_allocate_buffers(cam)) { 255 /* reset the camera */
261 retval = -ENOMEM; 256 if (cpia2_reset_camera(cam) < 0)
262 goto err_return; 257 return -EIO;
263 }
264 258
265 /* reset the camera */ 259 cam->APP_len = 0;
266 if (cpia2_reset_camera(cam) < 0) { 260 cam->COM_len = 0;
267 retval = -EIO;
268 goto err_return;
269 } 261 }
270 262
271 cam->APP_len = 0; 263 fh = kmalloc(sizeof(*fh), GFP_KERNEL);
272 cam->COM_len = 0; 264 if (!fh)
273 265 return -ENOMEM;
274skip_init: 266 file->private_data = fh;
275 { 267 fh->prio = V4L2_PRIORITY_UNSET;
276 struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL); 268 v4l2_prio_open(&cam->prio, &fh->prio);
277 if(!fh) { 269 fh->mmapped = 0;
278 retval = -ENOMEM;
279 goto err_return;
280 }
281 file->private_data = fh;
282 fh->prio = V4L2_PRIORITY_UNSET;
283 v4l2_prio_open(&cam->prio, &fh->prio);
284 fh->mmapped = 0;
285 }
286 270
287 ++cam->open_count; 271 ++cam->open_count;
288 272
289 cpia2_dbg_dump_registers(cam); 273 cpia2_dbg_dump_registers(cam);
290 274 return 0;
291err_return:
292 mutex_unlock(&cam->busy_lock);
293 return retval;
294} 275}
295 276
296/****************************************************************************** 277/******************************************************************************
@@ -304,15 +285,11 @@ static int cpia2_close(struct file *file)
304 struct camera_data *cam = video_get_drvdata(dev); 285 struct camera_data *cam = video_get_drvdata(dev);
305 struct cpia2_fh *fh = file->private_data; 286 struct cpia2_fh *fh = file->private_data;
306 287
307 mutex_lock(&cam->busy_lock);
308
309 if (cam->present && 288 if (cam->present &&
310 (cam->open_count == 1 289 (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
311 || fh->prio == V4L2_PRIORITY_RECORD
312 )) {
313 cpia2_usb_stream_stop(cam); 290 cpia2_usb_stream_stop(cam);
314 291
315 if(cam->open_count == 1) { 292 if (cam->open_count == 1) {
316 /* save camera state for later open */ 293 /* save camera state for later open */
317 cpia2_save_camera_state(cam); 294 cpia2_save_camera_state(cam);
318 295
@@ -321,26 +298,21 @@ static int cpia2_close(struct file *file)
321 } 298 }
322 } 299 }
323 300
324 { 301 if (fh->mmapped)
325 if(fh->mmapped) 302 cam->mmapped = 0;
326 cam->mmapped = 0; 303 v4l2_prio_close(&cam->prio, fh->prio);
327 v4l2_prio_close(&cam->prio, fh->prio); 304 file->private_data = NULL;
328 file->private_data = NULL; 305 kfree(fh);
329 kfree(fh);
330 }
331 306
332 if (--cam->open_count == 0) { 307 if (--cam->open_count == 0) {
333 cpia2_free_buffers(cam); 308 cpia2_free_buffers(cam);
334 if (!cam->present) { 309 if (!cam->present) {
335 video_unregister_device(dev); 310 video_unregister_device(dev);
336 mutex_unlock(&cam->busy_lock);
337 kfree(cam); 311 kfree(cam);
338 return 0; 312 return 0;
339 } 313 }
340 } 314 }
341 315
342 mutex_unlock(&cam->busy_lock);
343
344 return 0; 316 return 0;
345} 317}
346 318
@@ -405,11 +377,11 @@ static int sync(struct camera_data *cam, int frame_nr)
405 return 0; 377 return 0;
406 } 378 }
407 379
408 mutex_unlock(&cam->busy_lock); 380 mutex_unlock(&cam->v4l2_lock);
409 wait_event_interruptible(cam->wq_stream, 381 wait_event_interruptible(cam->wq_stream,
410 !cam->streaming || 382 !cam->streaming ||
411 frame->status == FRAME_READY); 383 frame->status == FRAME_READY);
412 mutex_lock(&cam->busy_lock); 384 mutex_lock(&cam->v4l2_lock);
413 if (signal_pending(current)) 385 if (signal_pending(current))
414 return -ERESTARTSYS; 386 return -ERESTARTSYS;
415 if(!cam->present) 387 if(!cam->present)
@@ -1293,11 +1265,11 @@ static int ioctl_dqbuf(void *arg,struct camera_data *cam, struct file *file)
1293 if(frame < 0) { 1265 if(frame < 0) {
1294 /* Wait for a frame to become available */ 1266 /* Wait for a frame to become available */
1295 struct framebuf *cb=cam->curbuff; 1267 struct framebuf *cb=cam->curbuff;
1296 mutex_unlock(&cam->busy_lock); 1268 mutex_unlock(&cam->v4l2_lock);
1297 wait_event_interruptible(cam->wq_stream, 1269 wait_event_interruptible(cam->wq_stream,
1298 !cam->present || 1270 !cam->present ||
1299 (cb=cam->curbuff)->status == FRAME_READY); 1271 (cb=cam->curbuff)->status == FRAME_READY);
1300 mutex_lock(&cam->busy_lock); 1272 mutex_lock(&cam->v4l2_lock);
1301 if (signal_pending(current)) 1273 if (signal_pending(current))
1302 return -ERESTARTSYS; 1274 return -ERESTARTSYS;
1303 if(!cam->present) 1275 if(!cam->present)
@@ -1337,14 +1309,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1337 if (!cam) 1309 if (!cam)
1338 return -ENOTTY; 1310 return -ENOTTY;
1339 1311
1340 /* make this _really_ smp-safe */ 1312 if (!cam->present)
1341 if (mutex_lock_interruptible(&cam->busy_lock))
1342 return -ERESTARTSYS;
1343
1344 if (!cam->present) {
1345 mutex_unlock(&cam->busy_lock);
1346 return -ENODEV; 1313 return -ENODEV;
1347 }
1348 1314
1349 /* Priority check */ 1315 /* Priority check */
1350 switch (cmd) { 1316 switch (cmd) {
@@ -1352,10 +1318,8 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1352 { 1318 {
1353 struct cpia2_fh *fh = file->private_data; 1319 struct cpia2_fh *fh = file->private_data;
1354 retval = v4l2_prio_check(&cam->prio, fh->prio); 1320 retval = v4l2_prio_check(&cam->prio, fh->prio);
1355 if(retval) { 1321 if (retval)
1356 mutex_unlock(&cam->busy_lock);
1357 return retval; 1322 return retval;
1358 }
1359 break; 1323 break;
1360 } 1324 }
1361 default: 1325 default:
@@ -1529,7 +1493,6 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1529 break; 1493 break;
1530 } 1494 }
1531 1495
1532 mutex_unlock(&cam->busy_lock);
1533 return retval; 1496 return retval;
1534} 1497}
1535 1498
@@ -1596,7 +1559,7 @@ static const struct v4l2_file_operations cpia2_fops = {
1596 .release = cpia2_close, 1559 .release = cpia2_close,
1597 .read = cpia2_v4l_read, 1560 .read = cpia2_v4l_read,
1598 .poll = cpia2_v4l_poll, 1561 .poll = cpia2_v4l_poll,
1599 .ioctl = cpia2_ioctl, 1562 .unlocked_ioctl = cpia2_ioctl,
1600 .mmap = cpia2_mmap, 1563 .mmap = cpia2_mmap,
1601}; 1564};
1602 1565
@@ -1620,6 +1583,7 @@ int cpia2_register_camera(struct camera_data *cam)
1620 1583
1621 memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template)); 1584 memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
1622 video_set_drvdata(cam->vdev, cam); 1585 video_set_drvdata(cam->vdev, cam);
1586 cam->vdev->lock = &cam->v4l2_lock;
1623 1587
1624 reset_camera_struct_v4l(cam); 1588 reset_camera_struct_v4l(cam);
1625 1589
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 133ec2bac180..944af8adbe0c 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -664,7 +664,7 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
664{ 664{
665 snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in", 665 snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
666 cx->v4l2_dev.name); 666 cx->v4l2_dev.name);
667 cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name); 667 cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
668 if (cx->in_work_queue == NULL) { 668 if (cx->in_work_queue == NULL) {
669 CX18_ERR("Unable to create incoming mailbox handler thread\n"); 669 CX18_ERR("Unable to create incoming mailbox handler thread\n");
670 return -ENOMEM; 670 return -ENOMEM;
@@ -672,18 +672,6 @@ static int __devinit cx18_create_in_workq(struct cx18 *cx)
672 return 0; 672 return 0;
673} 673}
674 674
675static int __devinit cx18_create_out_workq(struct cx18 *cx)
676{
677 snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
678 cx->v4l2_dev.name);
679 cx->out_work_queue = create_workqueue(cx->out_workq_name);
680 if (cx->out_work_queue == NULL) {
681 CX18_ERR("Unable to create outgoing mailbox handler threads\n");
682 return -ENOMEM;
683 }
684 return 0;
685}
686
687static void __devinit cx18_init_in_work_orders(struct cx18 *cx) 675static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
688{ 676{
689 int i; 677 int i;
@@ -710,15 +698,9 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
710 mutex_init(&cx->epu2apu_mb_lock); 698 mutex_init(&cx->epu2apu_mb_lock);
711 mutex_init(&cx->epu2cpu_mb_lock); 699 mutex_init(&cx->epu2cpu_mb_lock);
712 700
713 ret = cx18_create_out_workq(cx);
714 if (ret)
715 return ret;
716
717 ret = cx18_create_in_workq(cx); 701 ret = cx18_create_in_workq(cx);
718 if (ret) { 702 if (ret)
719 destroy_workqueue(cx->out_work_queue);
720 return ret; 703 return ret;
721 }
722 704
723 cx18_init_in_work_orders(cx); 705 cx18_init_in_work_orders(cx);
724 706
@@ -1107,7 +1089,6 @@ free_mem:
1107 release_mem_region(cx->base_addr, CX18_MEM_SIZE); 1089 release_mem_region(cx->base_addr, CX18_MEM_SIZE);
1108free_workqueues: 1090free_workqueues:
1109 destroy_workqueue(cx->in_work_queue); 1091 destroy_workqueue(cx->in_work_queue);
1110 destroy_workqueue(cx->out_work_queue);
1111err: 1092err:
1112 if (retval == 0) 1093 if (retval == 0)
1113 retval = -ENODEV; 1094 retval = -ENODEV;
@@ -1259,7 +1240,6 @@ static void cx18_remove(struct pci_dev *pci_dev)
1259 cx18_halt_firmware(cx); 1240 cx18_halt_firmware(cx);
1260 1241
1261 destroy_workqueue(cx->in_work_queue); 1242 destroy_workqueue(cx->in_work_queue);
1262 destroy_workqueue(cx->out_work_queue);
1263 1243
1264 cx18_streams_cleanup(cx, 1); 1244 cx18_streams_cleanup(cx, 1);
1265 1245
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index f6f3e50d4bdf..306caac6d3fc 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -617,9 +617,6 @@ struct cx18 {
617 struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS]; 617 struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
618 char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */ 618 char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
619 619
620 struct workqueue_struct *out_work_queue;
621 char out_workq_name[12]; /* "cx18-NN-out" */
622
623 /* i2c */ 620 /* i2c */
624 struct i2c_adapter i2c_adap[2]; 621 struct i2c_adapter i2c_adap[2];
625 struct i2c_algo_bit_data i2c_algo[2]; 622 struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 51765eb12d39..713b0e61536d 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -42,8 +42,7 @@ static inline bool cx18_stream_enabled(struct cx18_stream *s)
42/* Related to submission of mdls to firmware */ 42/* Related to submission of mdls to firmware */
43static inline void cx18_stream_load_fw_queue(struct cx18_stream *s) 43static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
44{ 44{
45 struct cx18 *cx = s->cx; 45 schedule_work(&s->out_work_order);
46 queue_work(cx->out_work_queue, &s->out_work_order);
47} 46}
48 47
49static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s, 48static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index fe59a1c3f064..363aa6004221 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -28,7 +28,6 @@
28#include <media/videobuf-vmalloc.h> 28#include <media/videobuf-vmalloc.h>
29 29
30#include "xc5000.h" 30#include "xc5000.h"
31#include "dvb_dummy_fe.h"
32#include "s5h1432.h" 31#include "s5h1432.h"
33#include "tda18271.h" 32#include "tda18271.h"
34#include "s5h1411.h" 33#include "s5h1411.h"
@@ -619,7 +618,7 @@ static int dvb_init(struct cx231xx *dev)
619 618
620 if (dev->dvb->frontend == NULL) { 619 if (dev->dvb->frontend == NULL) {
621 printk(DRIVER_NAME 620 printk(DRIVER_NAME
622 ": Failed to attach dummy front end\n"); 621 ": Failed to attach s5h1411 front end\n");
623 result = -EINVAL; 622 result = -EINVAL;
624 goto out_free; 623 goto out_free;
625 } 624 }
@@ -665,7 +664,7 @@ static int dvb_init(struct cx231xx *dev)
665 664
666 if (dev->dvb->frontend == NULL) { 665 if (dev->dvb->frontend == NULL) {
667 printk(DRIVER_NAME 666 printk(DRIVER_NAME
668 ": Failed to attach dummy front end\n"); 667 ": Failed to attach s5h1411 front end\n");
669 result = -EINVAL; 668 result = -EINVAL;
670 goto out_free; 669 goto out_free;
671 } 670 }
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f16461844c5c..6fc09dd41b9d 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1682,20 +1682,6 @@ static int cx25840_log_status(struct v4l2_subdev *sd)
1682 return 0; 1682 return 0;
1683} 1683}
1684 1684
1685static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
1686{
1687 struct cx25840_state *state = to_state(sd);
1688 struct i2c_client *client = v4l2_get_subdevdata(sd);
1689
1690 if (platform_data) {
1691 struct cx25840_platform_data *pdata = platform_data;
1692
1693 state->pvr150_workaround = pdata->pvr150_workaround;
1694 set_input(client, state->vid_input, state->aud_input);
1695 }
1696 return 0;
1697}
1698
1699static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status, 1685static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
1700 bool *handled) 1686 bool *handled)
1701{ 1687{
@@ -1787,7 +1773,6 @@ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
1787 1773
1788static const struct v4l2_subdev_core_ops cx25840_core_ops = { 1774static const struct v4l2_subdev_core_ops cx25840_core_ops = {
1789 .log_status = cx25840_log_status, 1775 .log_status = cx25840_log_status,
1790 .s_config = cx25840_s_config,
1791 .g_chip_ident = cx25840_g_chip_ident, 1776 .g_chip_ident = cx25840_g_chip_ident,
1792 .g_ctrl = v4l2_subdev_g_ctrl, 1777 .g_ctrl = v4l2_subdev_g_ctrl,
1793 .s_ctrl = v4l2_subdev_s_ctrl, 1778 .s_ctrl = v4l2_subdev_s_ctrl,
@@ -1974,7 +1959,6 @@ static int cx25840_probe(struct i2c_client *client,
1974 state->vid_input = CX25840_COMPOSITE7; 1959 state->vid_input = CX25840_COMPOSITE7;
1975 state->aud_input = CX25840_AUDIO8; 1960 state->aud_input = CX25840_AUDIO8;
1976 state->audclk_freq = 48000; 1961 state->audclk_freq = 48000;
1977 state->pvr150_workaround = 0;
1978 state->audmode = V4L2_TUNER_MODE_LANG1; 1962 state->audmode = V4L2_TUNER_MODE_LANG1;
1979 state->vbi_line_offset = 8; 1963 state->vbi_line_offset = 8;
1980 state->id = id; 1964 state->id = id;
@@ -2034,6 +2018,12 @@ static int cx25840_probe(struct i2c_client *client,
2034 v4l2_ctrl_cluster(2, &state->volume); 2018 v4l2_ctrl_cluster(2, &state->volume);
2035 v4l2_ctrl_handler_setup(&state->hdl); 2019 v4l2_ctrl_handler_setup(&state->hdl);
2036 2020
2021 if (client->dev.platform_data) {
2022 struct cx25840_platform_data *pdata = client->dev.platform_data;
2023
2024 state->pvr150_workaround = pdata->pvr150_workaround;
2025 }
2026
2037 cx25840_ir_probe(sd); 2027 cx25840_ir_probe(sd);
2038 return 0; 2028 return 0;
2039} 2029}
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 1f532e31cd49..9f3bfc1eb240 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -41,6 +41,183 @@ spinlock_t vpif_lock;
41 41
42void __iomem *vpif_base; 42void __iomem *vpif_base;
43 43
44/**
45 * ch_params: video standard configuration parameters for vpif
46 * The table must include all presets from supported subdevices.
47 */
48const struct vpif_channel_config_params ch_params[] = {
49 /* HDTV formats */
50 {
51 .name = "480p59_94",
52 .width = 720,
53 .height = 480,
54 .frm_fmt = 1,
55 .ycmux_mode = 0,
56 .eav2sav = 138-8,
57 .sav2eav = 720,
58 .l1 = 1,
59 .l3 = 43,
60 .l5 = 523,
61 .vsize = 525,
62 .capture_format = 0,
63 .vbi_supported = 0,
64 .hd_sd = 1,
65 .dv_preset = V4L2_DV_480P59_94,
66 },
67 {
68 .name = "576p50",
69 .width = 720,
70 .height = 576,
71 .frm_fmt = 1,
72 .ycmux_mode = 0,
73 .eav2sav = 144-8,
74 .sav2eav = 720,
75 .l1 = 1,
76 .l3 = 45,
77 .l5 = 621,
78 .vsize = 625,
79 .capture_format = 0,
80 .vbi_supported = 0,
81 .hd_sd = 1,
82 .dv_preset = V4L2_DV_576P50,
83 },
84 {
85 .name = "720p50",
86 .width = 1280,
87 .height = 720,
88 .frm_fmt = 1,
89 .ycmux_mode = 0,
90 .eav2sav = 700-8,
91 .sav2eav = 1280,
92 .l1 = 1,
93 .l3 = 26,
94 .l5 = 746,
95 .vsize = 750,
96 .capture_format = 0,
97 .vbi_supported = 0,
98 .hd_sd = 1,
99 .dv_preset = V4L2_DV_720P50,
100 },
101 {
102 .name = "720p60",
103 .width = 1280,
104 .height = 720,
105 .frm_fmt = 1,
106 .ycmux_mode = 0,
107 .eav2sav = 370 - 8,
108 .sav2eav = 1280,
109 .l1 = 1,
110 .l3 = 26,
111 .l5 = 746,
112 .vsize = 750,
113 .capture_format = 0,
114 .vbi_supported = 0,
115 .hd_sd = 1,
116 .dv_preset = V4L2_DV_720P60,
117 },
118 {
119 .name = "1080I50",
120 .width = 1920,
121 .height = 1080,
122 .frm_fmt = 0,
123 .ycmux_mode = 0,
124 .eav2sav = 720 - 8,
125 .sav2eav = 1920,
126 .l1 = 1,
127 .l3 = 21,
128 .l5 = 561,
129 .l7 = 563,
130 .l9 = 584,
131 .l11 = 1124,
132 .vsize = 1125,
133 .capture_format = 0,
134 .vbi_supported = 0,
135 .hd_sd = 1,
136 .dv_preset = V4L2_DV_1080I50,
137 },
138 {
139 .name = "1080I60",
140 .width = 1920,
141 .height = 1080,
142 .frm_fmt = 0,
143 .ycmux_mode = 0,
144 .eav2sav = 280 - 8,
145 .sav2eav = 1920,
146 .l1 = 1,
147 .l3 = 21,
148 .l5 = 561,
149 .l7 = 563,
150 .l9 = 584,
151 .l11 = 1124,
152 .vsize = 1125,
153 .capture_format = 0,
154 .vbi_supported = 0,
155 .hd_sd = 1,
156 .dv_preset = V4L2_DV_1080I60,
157 },
158 {
159 .name = "1080p60",
160 .width = 1920,
161 .height = 1080,
162 .frm_fmt = 1,
163 .ycmux_mode = 0,
164 .eav2sav = 280 - 8,
165 .sav2eav = 1920,
166 .l1 = 1,
167 .l3 = 42,
168 .l5 = 1122,
169 .vsize = 1125,
170 .capture_format = 0,
171 .vbi_supported = 0,
172 .hd_sd = 1,
173 .dv_preset = V4L2_DV_1080P60,
174 },
175
176 /* SDTV formats */
177 {
178 .name = "NTSC_M",
179 .width = 720,
180 .height = 480,
181 .frm_fmt = 0,
182 .ycmux_mode = 1,
183 .eav2sav = 268,
184 .sav2eav = 1440,
185 .l1 = 1,
186 .l3 = 23,
187 .l5 = 263,
188 .l7 = 266,
189 .l9 = 286,
190 .l11 = 525,
191 .vsize = 525,
192 .capture_format = 0,
193 .vbi_supported = 1,
194 .hd_sd = 0,
195 .stdid = V4L2_STD_525_60,
196 },
197 {
198 .name = "PAL_BDGHIK",
199 .width = 720,
200 .height = 576,
201 .frm_fmt = 0,
202 .ycmux_mode = 1,
203 .eav2sav = 280,
204 .sav2eav = 1440,
205 .l1 = 1,
206 .l3 = 23,
207 .l5 = 311,
208 .l7 = 313,
209 .l9 = 336,
210 .l11 = 624,
211 .vsize = 625,
212 .capture_format = 0,
213 .vbi_supported = 1,
214 .hd_sd = 0,
215 .stdid = V4L2_STD_625_50,
216 },
217};
218
219const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
220
44static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val) 221static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
45{ 222{
46 if (val) 223 if (val)
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index ebd5c4338ebb..10550bd93b06 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -577,12 +577,10 @@ struct vpif_channel_config_params {
577 char name[VPIF_MAX_NAME]; /* Name of the mode */ 577 char name[VPIF_MAX_NAME]; /* Name of the mode */
578 u16 width; /* Indicates width of the image */ 578 u16 width; /* Indicates width of the image */
579 u16 height; /* Indicates height of the image */ 579 u16 height; /* Indicates height of the image */
580 u8 fps; 580 u8 frm_fmt; /* Interlaced (0) or progressive (1) */
581 u8 frm_fmt; /* Indicates whether this is interlaced 581 u8 ycmux_mode; /* This mode requires one (0) or two (1)
582 * or progressive format */ 582 channels */
583 u8 ycmux_mode; /* Indicates whether this mode requires 583 u16 eav2sav; /* length of eav 2 sav */
584 * single or two channels */
585 u16 eav2sav; /* length of sav 2 eav */
586 u16 sav2eav; /* length of sav 2 eav */ 584 u16 sav2eav; /* length of sav 2 eav */
587 u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */ 585 u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */
588 u16 vsize; /* Vertical size of the image */ 586 u16 vsize; /* Vertical size of the image */
@@ -590,10 +588,14 @@ struct vpif_channel_config_params {
590 * is in BT or in CCD/CMOS */ 588 * is in BT or in CCD/CMOS */
591 u8 vbi_supported; /* Indicates whether this mode 589 u8 vbi_supported; /* Indicates whether this mode
592 * supports capturing vbi or not */ 590 * supports capturing vbi or not */
593 u8 hd_sd; 591 u8 hd_sd; /* HDTV (1) or SDTV (0) format */
594 v4l2_std_id stdid; 592 v4l2_std_id stdid; /* SDTV format */
593 u32 dv_preset; /* HDTV format */
595}; 594};
596 595
596extern const unsigned int vpif_ch_params_count;
597extern const struct vpif_channel_config_params ch_params[];
598
597struct vpif_video_params; 599struct vpif_video_params;
598struct vpif_params; 600struct vpif_params;
599struct vpif_vbi_params; 601struct vpif_vbi_params;
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 193abab6b355..d93ad74a34c5 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -37,6 +37,7 @@
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <media/v4l2-device.h> 38#include <media/v4l2-device.h>
39#include <media/v4l2-ioctl.h> 39#include <media/v4l2-ioctl.h>
40#include <media/v4l2-chip-ident.h>
40 41
41#include "vpif_capture.h" 42#include "vpif_capture.h"
42#include "vpif.h" 43#include "vpif.h"
@@ -81,20 +82,6 @@ static struct vpif_device vpif_obj = { {NULL} };
81static struct device *vpif_dev; 82static struct device *vpif_dev;
82 83
83/** 84/**
84 * ch_params: video standard configuration parameters for vpif
85 */
86static const struct vpif_channel_config_params ch_params[] = {
87 {
88 "NTSC_M", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
89 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
90 },
91 {
92 "PAL_BDGHIK", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
93 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
94 },
95};
96
97/**
98 * vpif_uservirt_to_phys : translate user/virtual address to phy address 85 * vpif_uservirt_to_phys : translate user/virtual address to phy address
99 * @virtp: user/virtual address 86 * @virtp: user/virtual address
100 * 87 *
@@ -342,7 +329,7 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
342 * @dev_id: dev_id ptr 329 * @dev_id: dev_id ptr
343 * 330 *
344 * It changes status of the captured buffer, takes next buffer from the queue 331 * It changes status of the captured buffer, takes next buffer from the queue
345 * and sets its address in VPIF registers 332 * and sets its address in VPIF registers
346 */ 333 */
347static irqreturn_t vpif_channel_isr(int irq, void *dev_id) 334static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
348{ 335{
@@ -435,24 +422,31 @@ static int vpif_update_std_info(struct channel_obj *ch)
435 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; 422 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
436 struct vpif_params *vpifparams = &ch->vpifparams; 423 struct vpif_params *vpifparams = &ch->vpifparams;
437 const struct vpif_channel_config_params *config; 424 const struct vpif_channel_config_params *config;
438 struct vpif_channel_config_params *std_info; 425 struct vpif_channel_config_params *std_info = &vpifparams->std_info;
439 struct video_obj *vid_ch = &ch->video; 426 struct video_obj *vid_ch = &ch->video;
440 int index; 427 int index;
441 428
442 vpif_dbg(2, debug, "vpif_update_std_info\n"); 429 vpif_dbg(2, debug, "vpif_update_std_info\n");
443 430
444 std_info = &vpifparams->std_info; 431 for (index = 0; index < vpif_ch_params_count; index++) {
445
446 for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
447 config = &ch_params[index]; 432 config = &ch_params[index];
448 if (config->stdid & vid_ch->stdid) { 433 if (config->hd_sd == 0) {
449 memcpy(std_info, config, sizeof(*config)); 434 vpif_dbg(2, debug, "SD format\n");
450 break; 435 if (config->stdid & vid_ch->stdid) {
436 memcpy(std_info, config, sizeof(*config));
437 break;
438 }
439 } else {
440 vpif_dbg(2, debug, "HD format\n");
441 if (config->dv_preset == vid_ch->dv_preset) {
442 memcpy(std_info, config, sizeof(*config));
443 break;
444 }
451 } 445 }
452 } 446 }
453 447
454 /* standard not found */ 448 /* standard not found */
455 if (index == ARRAY_SIZE(ch_params)) 449 if (index == vpif_ch_params_count)
456 return -EINVAL; 450 return -EINVAL;
457 451
458 common->fmt.fmt.pix.width = std_info->width; 452 common->fmt.fmt.pix.width = std_info->width;
@@ -462,6 +456,7 @@ static int vpif_update_std_info(struct channel_obj *ch)
462 common->fmt.fmt.pix.bytesperline = std_info->width; 456 common->fmt.fmt.pix.bytesperline = std_info->width;
463 vpifparams->video_params.hpitch = std_info->width; 457 vpifparams->video_params.hpitch = std_info->width;
464 vpifparams->video_params.storage_mode = std_info->frm_fmt; 458 vpifparams->video_params.storage_mode = std_info->frm_fmt;
459
465 return 0; 460 return 0;
466} 461}
467 462
@@ -757,7 +752,7 @@ static int vpif_open(struct file *filep)
757 struct video_obj *vid_ch; 752 struct video_obj *vid_ch;
758 struct channel_obj *ch; 753 struct channel_obj *ch;
759 struct vpif_fh *fh; 754 struct vpif_fh *fh;
760 int i, ret = 0; 755 int i;
761 756
762 vpif_dbg(2, debug, "vpif_open\n"); 757 vpif_dbg(2, debug, "vpif_open\n");
763 758
@@ -766,9 +761,6 @@ static int vpif_open(struct file *filep)
766 vid_ch = &ch->video; 761 vid_ch = &ch->video;
767 common = &ch->common[VPIF_VIDEO_INDEX]; 762 common = &ch->common[VPIF_VIDEO_INDEX];
768 763
769 if (mutex_lock_interruptible(&common->lock))
770 return -ERESTARTSYS;
771
772 if (NULL == ch->curr_subdev_info) { 764 if (NULL == ch->curr_subdev_info) {
773 /** 765 /**
774 * search through the sub device to see a registered 766 * search through the sub device to see a registered
@@ -785,8 +777,7 @@ static int vpif_open(struct file *filep)
785 } 777 }
786 if (i == config->subdev_count) { 778 if (i == config->subdev_count) {
787 vpif_err("No sub device registered\n"); 779 vpif_err("No sub device registered\n");
788 ret = -ENOENT; 780 return -ENOENT;
789 goto exit;
790 } 781 }
791 } 782 }
792 783
@@ -794,8 +785,7 @@ static int vpif_open(struct file *filep)
794 fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL); 785 fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
795 if (NULL == fh) { 786 if (NULL == fh) {
796 vpif_err("unable to allocate memory for file handle object\n"); 787 vpif_err("unable to allocate memory for file handle object\n");
797 ret = -ENOMEM; 788 return -ENOMEM;
798 goto exit;
799 } 789 }
800 790
801 /* store pointer to fh in private_data member of filep */ 791 /* store pointer to fh in private_data member of filep */
@@ -815,9 +805,7 @@ static int vpif_open(struct file *filep)
815 /* Initialize priority of this instance to default priority */ 805 /* Initialize priority of this instance to default priority */
816 fh->prio = V4L2_PRIORITY_UNSET; 806 fh->prio = V4L2_PRIORITY_UNSET;
817 v4l2_prio_open(&ch->prio, &fh->prio); 807 v4l2_prio_open(&ch->prio, &fh->prio);
818exit: 808 return 0;
819 mutex_unlock(&common->lock);
820 return ret;
821} 809}
822 810
823/** 811/**
@@ -837,9 +825,6 @@ static int vpif_release(struct file *filep)
837 825
838 common = &ch->common[VPIF_VIDEO_INDEX]; 826 common = &ch->common[VPIF_VIDEO_INDEX];
839 827
840 if (mutex_lock_interruptible(&common->lock))
841 return -ERESTARTSYS;
842
843 /* if this instance is doing IO */ 828 /* if this instance is doing IO */
844 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 829 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
845 /* Reset io_usrs member of channel object */ 830 /* Reset io_usrs member of channel object */
@@ -863,9 +848,6 @@ static int vpif_release(struct file *filep)
863 /* Decrement channel usrs counter */ 848 /* Decrement channel usrs counter */
864 ch->usrs--; 849 ch->usrs--;
865 850
866 /* unlock mutex on channel object */
867 mutex_unlock(&common->lock);
868
869 /* Close the priority */ 851 /* Close the priority */
870 v4l2_prio_close(&ch->prio, fh->prio); 852 v4l2_prio_close(&ch->prio, fh->prio);
871 853
@@ -890,7 +872,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
890 struct channel_obj *ch = fh->channel; 872 struct channel_obj *ch = fh->channel;
891 struct common_obj *common; 873 struct common_obj *common;
892 u8 index = 0; 874 u8 index = 0;
893 int ret = 0;
894 875
895 vpif_dbg(2, debug, "vpif_reqbufs\n"); 876 vpif_dbg(2, debug, "vpif_reqbufs\n");
896 877
@@ -913,13 +894,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
913 894
914 common = &ch->common[index]; 895 common = &ch->common[index];
915 896
916 if (mutex_lock_interruptible(&common->lock)) 897 if (0 != common->io_usrs)
917 return -ERESTARTSYS; 898 return -EBUSY;
918
919 if (0 != common->io_usrs) {
920 ret = -EBUSY;
921 goto reqbuf_exit;
922 }
923 899
924 /* Initialize videobuf queue as per the buffer type */ 900 /* Initialize videobuf queue as per the buffer type */
925 videobuf_queue_dma_contig_init(&common->buffer_queue, 901 videobuf_queue_dma_contig_init(&common->buffer_queue,
@@ -928,7 +904,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
928 reqbuf->type, 904 reqbuf->type,
929 common->fmt.fmt.pix.field, 905 common->fmt.fmt.pix.field,
930 sizeof(struct videobuf_buffer), fh, 906 sizeof(struct videobuf_buffer), fh,
931 NULL); 907 &common->lock);
932 908
933 /* Set io allowed member of file handle to TRUE */ 909 /* Set io allowed member of file handle to TRUE */
934 fh->io_allowed[index] = 1; 910 fh->io_allowed[index] = 1;
@@ -939,11 +915,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
939 INIT_LIST_HEAD(&common->dma_queue); 915 INIT_LIST_HEAD(&common->dma_queue);
940 916
941 /* Allocate buffers */ 917 /* Allocate buffers */
942 ret = videobuf_reqbufs(&common->buffer_queue, reqbuf); 918 return videobuf_reqbufs(&common->buffer_queue, reqbuf);
943
944reqbuf_exit:
945 mutex_unlock(&common->lock);
946 return ret;
947} 919}
948 920
949/** 921/**
@@ -1157,11 +1129,6 @@ static int vpif_streamon(struct file *file, void *priv,
1157 return ret; 1129 return ret;
1158 } 1130 }
1159 1131
1160 if (mutex_lock_interruptible(&common->lock)) {
1161 ret = -ERESTARTSYS;
1162 goto streamoff_exit;
1163 }
1164
1165 /* If buffer queue is empty, return error */ 1132 /* If buffer queue is empty, return error */
1166 if (list_empty(&common->dma_queue)) { 1133 if (list_empty(&common->dma_queue)) {
1167 vpif_dbg(1, debug, "buffer queue is empty\n"); 1134 vpif_dbg(1, debug, "buffer queue is empty\n");
@@ -1240,13 +1207,10 @@ static int vpif_streamon(struct file *file, void *priv,
1240 enable_channel1(1); 1207 enable_channel1(1);
1241 } 1208 }
1242 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; 1209 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
1243 mutex_unlock(&common->lock);
1244 return ret; 1210 return ret;
1245 1211
1246exit: 1212exit:
1247 mutex_unlock(&common->lock); 1213 videobuf_streamoff(&common->buffer_queue);
1248streamoff_exit:
1249 ret = videobuf_streamoff(&common->buffer_queue);
1250 return ret; 1214 return ret;
1251} 1215}
1252 1216
@@ -1284,9 +1248,6 @@ static int vpif_streamoff(struct file *file, void *priv,
1284 return -EINVAL; 1248 return -EINVAL;
1285 } 1249 }
1286 1250
1287 if (mutex_lock_interruptible(&common->lock))
1288 return -ERESTARTSYS;
1289
1290 /* disable channel */ 1251 /* disable channel */
1291 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { 1252 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
1292 enable_channel0(0); 1253 enable_channel0(0);
@@ -1304,8 +1265,6 @@ static int vpif_streamoff(struct file *file, void *priv,
1304 if (ret && (ret != -ENOIOCTLCMD)) 1265 if (ret && (ret != -ENOIOCTLCMD))
1305 vpif_dbg(1, debug, "stream off failed in subdev\n"); 1266 vpif_dbg(1, debug, "stream off failed in subdev\n");
1306 1267
1307 mutex_unlock(&common->lock);
1308
1309 return videobuf_streamoff(&common->buffer_queue); 1268 return videobuf_streamoff(&common->buffer_queue);
1310} 1269}
1311 1270
@@ -1381,21 +1340,16 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
1381{ 1340{
1382 struct vpif_fh *fh = priv; 1341 struct vpif_fh *fh = priv;
1383 struct channel_obj *ch = fh->channel; 1342 struct channel_obj *ch = fh->channel;
1384 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1385 int ret = 0; 1343 int ret = 0;
1386 1344
1387 vpif_dbg(2, debug, "vpif_querystd\n"); 1345 vpif_dbg(2, debug, "vpif_querystd\n");
1388 1346
1389 if (mutex_lock_interruptible(&common->lock))
1390 return -ERESTARTSYS;
1391
1392 /* Call querystd function of decoder device */ 1347 /* Call querystd function of decoder device */
1393 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video, 1348 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
1394 querystd, std_id); 1349 querystd, std_id);
1395 if (ret < 0) 1350 if (ret < 0)
1396 vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); 1351 vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
1397 1352
1398 mutex_unlock(&common->lock);
1399 return ret; 1353 return ret;
1400} 1354}
1401 1355
@@ -1451,16 +1405,14 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
1451 fh->initialized = 1; 1405 fh->initialized = 1;
1452 1406
1453 /* Call encoder subdevice function to set the standard */ 1407 /* Call encoder subdevice function to set the standard */
1454 if (mutex_lock_interruptible(&common->lock))
1455 return -ERESTARTSYS;
1456
1457 ch->video.stdid = *std_id; 1408 ch->video.stdid = *std_id;
1409 ch->video.dv_preset = V4L2_DV_INVALID;
1410 memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
1458 1411
1459 /* Get the information about the standard */ 1412 /* Get the information about the standard */
1460 if (vpif_update_std_info(ch)) { 1413 if (vpif_update_std_info(ch)) {
1461 ret = -EINVAL;
1462 vpif_err("Error getting the standard info\n"); 1414 vpif_err("Error getting the standard info\n");
1463 goto s_std_exit; 1415 return -EINVAL;
1464 } 1416 }
1465 1417
1466 /* Configure the default format information */ 1418 /* Configure the default format information */
@@ -1471,9 +1423,6 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
1471 s_std, *std_id); 1423 s_std, *std_id);
1472 if (ret < 0) 1424 if (ret < 0)
1473 vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); 1425 vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
1474
1475s_std_exit:
1476 mutex_unlock(&common->lock);
1477 return ret; 1426 return ret;
1478} 1427}
1479 1428
@@ -1567,9 +1516,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1567 return -EINVAL; 1516 return -EINVAL;
1568 } 1517 }
1569 1518
1570 if (mutex_lock_interruptible(&common->lock))
1571 return -ERESTARTSYS;
1572
1573 /* first setup input path from sub device to vpif */ 1519 /* first setup input path from sub device to vpif */
1574 if (config->setup_input_path) { 1520 if (config->setup_input_path) {
1575 ret = config->setup_input_path(ch->channel_id, 1521 ret = config->setup_input_path(ch->channel_id,
@@ -1578,7 +1524,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1578 vpif_dbg(1, debug, "couldn't setup input path for the" 1524 vpif_dbg(1, debug, "couldn't setup input path for the"
1579 " sub device %s, for input index %d\n", 1525 " sub device %s, for input index %d\n",
1580 subdev_info->name, index); 1526 subdev_info->name, index);
1581 goto exit; 1527 return ret;
1582 } 1528 }
1583 } 1529 }
1584 1530
@@ -1589,7 +1535,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1589 input, output, 0); 1535 input, output, 0);
1590 if (ret < 0) { 1536 if (ret < 0) {
1591 vpif_dbg(1, debug, "Failed to set input\n"); 1537 vpif_dbg(1, debug, "Failed to set input\n");
1592 goto exit; 1538 return ret;
1593 } 1539 }
1594 } 1540 }
1595 vid_ch->input_idx = index; 1541 vid_ch->input_idx = index;
@@ -1600,9 +1546,6 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
1600 1546
1601 /* update tvnorms from the sub device input info */ 1547 /* update tvnorms from the sub device input info */
1602 ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std; 1548 ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
1603
1604exit:
1605 mutex_unlock(&common->lock);
1606 return ret; 1549 return ret;
1607} 1550}
1608 1551
@@ -1671,11 +1614,7 @@ static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
1671 return -EINVAL; 1614 return -EINVAL;
1672 1615
1673 /* Fill in the information about format */ 1616 /* Fill in the information about format */
1674 if (mutex_lock_interruptible(&common->lock))
1675 return -ERESTARTSYS;
1676
1677 *fmt = common->fmt; 1617 *fmt = common->fmt;
1678 mutex_unlock(&common->lock);
1679 return 0; 1618 return 0;
1680} 1619}
1681 1620
@@ -1694,7 +1633,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
1694 struct v4l2_pix_format *pixfmt; 1633 struct v4l2_pix_format *pixfmt;
1695 int ret = 0; 1634 int ret = 0;
1696 1635
1697 vpif_dbg(2, debug, "VIDIOC_S_FMT\n"); 1636 vpif_dbg(2, debug, "%s\n", __func__);
1698 1637
1699 /* If streaming is started, return error */ 1638 /* If streaming is started, return error */
1700 if (common->started) { 1639 if (common->started) {
@@ -1723,12 +1662,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
1723 if (ret) 1662 if (ret)
1724 return ret; 1663 return ret;
1725 /* store the format in the channel object */ 1664 /* store the format in the channel object */
1726 if (mutex_lock_interruptible(&common->lock))
1727 return -ERESTARTSYS;
1728
1729 common->fmt = *fmt; 1665 common->fmt = *fmt;
1730 mutex_unlock(&common->lock);
1731
1732 return 0; 1666 return 0;
1733} 1667}
1734 1668
@@ -1807,6 +1741,306 @@ static int vpif_cropcap(struct file *file, void *priv,
1807 return 0; 1741 return 0;
1808} 1742}
1809 1743
1744/**
1745 * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
1746 * @file: file ptr
1747 * @priv: file handle
1748 * @preset: input preset
1749 */
1750static int vpif_enum_dv_presets(struct file *file, void *priv,
1751 struct v4l2_dv_enum_preset *preset)
1752{
1753 struct vpif_fh *fh = priv;
1754 struct channel_obj *ch = fh->channel;
1755
1756 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
1757 video, enum_dv_presets, preset);
1758}
1759
1760/**
1761 * vpif_query_dv_presets() - QUERY_DV_PRESET handler
1762 * @file: file ptr
1763 * @priv: file handle
1764 * @preset: input preset
1765 */
1766static int vpif_query_dv_preset(struct file *file, void *priv,
1767 struct v4l2_dv_preset *preset)
1768{
1769 struct vpif_fh *fh = priv;
1770 struct channel_obj *ch = fh->channel;
1771
1772 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
1773 video, query_dv_preset, preset);
1774}
1775/**
1776 * vpif_s_dv_presets() - S_DV_PRESETS handler
1777 * @file: file ptr
1778 * @priv: file handle
1779 * @preset: input preset
1780 */
1781static int vpif_s_dv_preset(struct file *file, void *priv,
1782 struct v4l2_dv_preset *preset)
1783{
1784 struct vpif_fh *fh = priv;
1785 struct channel_obj *ch = fh->channel;
1786 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1787 int ret = 0;
1788
1789 if (common->started) {
1790 vpif_dbg(1, debug, "streaming in progress\n");
1791 return -EBUSY;
1792 }
1793
1794 if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
1795 (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
1796 if (!fh->initialized) {
1797 vpif_dbg(1, debug, "Channel Busy\n");
1798 return -EBUSY;
1799 }
1800 }
1801
1802 ret = v4l2_prio_check(&ch->prio, fh->prio);
1803 if (ret)
1804 return ret;
1805
1806 fh->initialized = 1;
1807
1808 /* Call encoder subdevice function to set the standard */
1809 if (mutex_lock_interruptible(&common->lock))
1810 return -ERESTARTSYS;
1811
1812 ch->video.dv_preset = preset->preset;
1813 ch->video.stdid = V4L2_STD_UNKNOWN;
1814 memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
1815
1816 /* Get the information about the standard */
1817 if (vpif_update_std_info(ch)) {
1818 vpif_dbg(1, debug, "Error getting the standard info\n");
1819 ret = -EINVAL;
1820 } else {
1821 /* Configure the default format information */
1822 vpif_config_format(ch);
1823
1824 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
1825 video, s_dv_preset, preset);
1826 }
1827
1828 mutex_unlock(&common->lock);
1829
1830 return ret;
1831}
1832/**
1833 * vpif_g_dv_presets() - G_DV_PRESETS handler
1834 * @file: file ptr
1835 * @priv: file handle
1836 * @preset: input preset
1837 */
1838static int vpif_g_dv_preset(struct file *file, void *priv,
1839 struct v4l2_dv_preset *preset)
1840{
1841 struct vpif_fh *fh = priv;
1842 struct channel_obj *ch = fh->channel;
1843
1844 preset->preset = ch->video.dv_preset;
1845
1846 return 0;
1847}
1848
1849/**
1850 * vpif_s_dv_timings() - S_DV_TIMINGS handler
1851 * @file: file ptr
1852 * @priv: file handle
1853 * @timings: digital video timings
1854 */
1855static int vpif_s_dv_timings(struct file *file, void *priv,
1856 struct v4l2_dv_timings *timings)
1857{
1858 struct vpif_fh *fh = priv;
1859 struct channel_obj *ch = fh->channel;
1860 struct vpif_params *vpifparams = &ch->vpifparams;
1861 struct vpif_channel_config_params *std_info = &vpifparams->std_info;
1862 struct video_obj *vid_ch = &ch->video;
1863 struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
1864 int ret;
1865
1866 if (timings->type != V4L2_DV_BT_656_1120) {
1867 vpif_dbg(2, debug, "Timing type not defined\n");
1868 return -EINVAL;
1869 }
1870
1871 /* Configure subdevice timings, if any */
1872 ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
1873 video, s_dv_timings, timings);
1874 if (ret == -ENOIOCTLCMD) {
1875 vpif_dbg(2, debug, "Custom DV timings not supported by "
1876 "subdevice\n");
1877 return -EINVAL;
1878 }
1879 if (ret < 0) {
1880 vpif_dbg(2, debug, "Error setting custom DV timings\n");
1881 return ret;
1882 }
1883
1884 if (!(timings->bt.width && timings->bt.height &&
1885 (timings->bt.hbackporch ||
1886 timings->bt.hfrontporch ||
1887 timings->bt.hsync) &&
1888 timings->bt.vfrontporch &&
1889 (timings->bt.vbackporch ||
1890 timings->bt.vsync))) {
1891 vpif_dbg(2, debug, "Timings for width, height, "
1892 "horizontal back porch, horizontal sync, "
1893 "horizontal front porch, vertical back porch, "
1894 "vertical sync and vertical back porch "
1895 "must be defined\n");
1896 return -EINVAL;
1897 }
1898
1899 *bt = timings->bt;
1900
1901 /* Configure video port timings */
1902
1903 std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
1904 bt->hsync - 8;
1905 std_info->sav2eav = bt->width;
1906
1907 std_info->l1 = 1;
1908 std_info->l3 = bt->vsync + bt->vbackporch + 1;
1909
1910 if (bt->interlaced) {
1911 if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
1912 std_info->vsize = bt->height * 2 +
1913 bt->vfrontporch + bt->vsync + bt->vbackporch +
1914 bt->il_vfrontporch + bt->il_vsync +
1915 bt->il_vbackporch;
1916 std_info->l5 = std_info->vsize/2 -
1917 (bt->vfrontporch - 1);
1918 std_info->l7 = std_info->vsize/2 + 1;
1919 std_info->l9 = std_info->l7 + bt->il_vsync +
1920 bt->il_vbackporch + 1;
1921 std_info->l11 = std_info->vsize -
1922 (bt->il_vfrontporch - 1);
1923 } else {
1924 vpif_dbg(2, debug, "Required timing values for "
1925 "interlaced BT format missing\n");
1926 return -EINVAL;
1927 }
1928 } else {
1929 std_info->vsize = bt->height + bt->vfrontporch +
1930 bt->vsync + bt->vbackporch;
1931 std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
1932 }
1933 strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
1934 std_info->width = bt->width;
1935 std_info->height = bt->height;
1936 std_info->frm_fmt = bt->interlaced ? 0 : 1;
1937 std_info->ycmux_mode = 0;
1938 std_info->capture_format = 0;
1939 std_info->vbi_supported = 0;
1940 std_info->hd_sd = 1;
1941 std_info->stdid = 0;
1942 std_info->dv_preset = V4L2_DV_INVALID;
1943
1944 vid_ch->stdid = 0;
1945 vid_ch->dv_preset = V4L2_DV_INVALID;
1946 return 0;
1947}
1948
1949/**
1950 * vpif_g_dv_timings() - G_DV_TIMINGS handler
1951 * @file: file ptr
1952 * @priv: file handle
1953 * @timings: digital video timings
1954 */
1955static int vpif_g_dv_timings(struct file *file, void *priv,
1956 struct v4l2_dv_timings *timings)
1957{
1958 struct vpif_fh *fh = priv;
1959 struct channel_obj *ch = fh->channel;
1960 struct video_obj *vid_ch = &ch->video;
1961 struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
1962
1963 timings->bt = *bt;
1964
1965 return 0;
1966}
1967
1968/*
1969 * vpif_g_chip_ident() - Identify the chip
1970 * @file: file ptr
1971 * @priv: file handle
1972 * @chip: chip identity
1973 *
1974 * Returns zero or -EINVAL if read operations fails.
1975 */
1976static int vpif_g_chip_ident(struct file *file, void *priv,
1977 struct v4l2_dbg_chip_ident *chip)
1978{
1979 chip->ident = V4L2_IDENT_NONE;
1980 chip->revision = 0;
1981 if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
1982 chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
1983 vpif_dbg(2, debug, "match_type is invalid.\n");
1984 return -EINVAL;
1985 }
1986
1987 return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
1988 g_chip_ident, chip);
1989}
1990
1991#ifdef CONFIG_VIDEO_ADV_DEBUG
1992/*
1993 * vpif_dbg_g_register() - Read register
1994 * @file: file ptr
1995 * @priv: file handle
1996 * @reg: register to be read
1997 *
1998 * Debugging only
1999 * Returns zero or -EINVAL if read operations fails.
2000 */
2001static int vpif_dbg_g_register(struct file *file, void *priv,
2002 struct v4l2_dbg_register *reg){
2003 struct vpif_fh *fh = priv;
2004 struct channel_obj *ch = fh->channel;
2005
2006 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
2007 g_register, reg);
2008}
2009
2010/*
2011 * vpif_dbg_s_register() - Write to register
2012 * @file: file ptr
2013 * @priv: file handle
2014 * @reg: register to be modified
2015 *
2016 * Debugging only
2017 * Returns zero or -EINVAL if write operations fails.
2018 */
2019static int vpif_dbg_s_register(struct file *file, void *priv,
2020 struct v4l2_dbg_register *reg){
2021 struct vpif_fh *fh = priv;
2022 struct channel_obj *ch = fh->channel;
2023
2024 return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
2025 s_register, reg);
2026}
2027#endif
2028
2029/*
2030 * vpif_log_status() - Status information
2031 * @file: file ptr
2032 * @priv: file handle
2033 *
2034 * Returns zero.
2035 */
2036static int vpif_log_status(struct file *filep, void *priv)
2037{
2038 /* status for sub devices */
2039 v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
2040
2041 return 0;
2042}
2043
1810/* vpif capture ioctl operations */ 2044/* vpif capture ioctl operations */
1811static const struct v4l2_ioctl_ops vpif_ioctl_ops = { 2045static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
1812 .vidioc_querycap = vpif_querycap, 2046 .vidioc_querycap = vpif_querycap,
@@ -1829,6 +2063,18 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
1829 .vidioc_streamon = vpif_streamon, 2063 .vidioc_streamon = vpif_streamon,
1830 .vidioc_streamoff = vpif_streamoff, 2064 .vidioc_streamoff = vpif_streamoff,
1831 .vidioc_cropcap = vpif_cropcap, 2065 .vidioc_cropcap = vpif_cropcap,
2066 .vidioc_enum_dv_presets = vpif_enum_dv_presets,
2067 .vidioc_s_dv_preset = vpif_s_dv_preset,
2068 .vidioc_g_dv_preset = vpif_g_dv_preset,
2069 .vidioc_query_dv_preset = vpif_query_dv_preset,
2070 .vidioc_s_dv_timings = vpif_s_dv_timings,
2071 .vidioc_g_dv_timings = vpif_g_dv_timings,
2072 .vidioc_g_chip_ident = vpif_g_chip_ident,
2073#ifdef CONFIG_VIDEO_ADV_DEBUG
2074 .vidioc_g_register = vpif_dbg_g_register,
2075 .vidioc_s_register = vpif_dbg_s_register,
2076#endif
2077 .vidioc_log_status = vpif_log_status,
1832}; 2078};
1833 2079
1834/* vpif file operations */ 2080/* vpif file operations */
@@ -1836,7 +2082,7 @@ static struct v4l2_file_operations vpif_fops = {
1836 .owner = THIS_MODULE, 2082 .owner = THIS_MODULE,
1837 .open = vpif_open, 2083 .open = vpif_open,
1838 .release = vpif_release, 2084 .release = vpif_release,
1839 .ioctl = video_ioctl2, 2085 .unlocked_ioctl = video_ioctl2,
1840 .mmap = vpif_mmap, 2086 .mmap = vpif_mmap,
1841 .poll = vpif_poll 2087 .poll = vpif_poll
1842}; 2088};
@@ -1979,6 +2225,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1979 common = &(ch->common[VPIF_VIDEO_INDEX]); 2225 common = &(ch->common[VPIF_VIDEO_INDEX]);
1980 spin_lock_init(&common->irqlock); 2226 spin_lock_init(&common->irqlock);
1981 mutex_init(&common->lock); 2227 mutex_init(&common->lock);
2228 ch->video_dev->lock = &common->lock;
1982 /* Initialize prio member of channel object */ 2229 /* Initialize prio member of channel object */
1983 v4l2_prio_init(&ch->prio); 2230 v4l2_prio_init(&ch->prio);
1984 err = video_register_device(ch->video_dev, 2231 err = video_register_device(ch->video_dev,
@@ -2026,9 +2273,9 @@ static __init int vpif_probe(struct platform_device *pdev)
2026 if (vpif_obj.sd[i]) 2273 if (vpif_obj.sd[i])
2027 vpif_obj.sd[i]->grp_id = 1 << i; 2274 vpif_obj.sd[i]->grp_id = 1 << i;
2028 } 2275 }
2029 v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF Capture driver"
2030 " initialized\n");
2031 2276
2277 v4l2_info(&vpif_obj.v4l2_dev,
2278 "DM646x VPIF capture driver initialized\n");
2032 return 0; 2279 return 0;
2033 2280
2034probe_subdev_out: 2281probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 4e12ec8cac6f..7a4196dfdce1 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -59,6 +59,8 @@ struct video_obj {
59 enum v4l2_field buf_field; 59 enum v4l2_field buf_field;
60 /* Currently selected or default standard */ 60 /* Currently selected or default standard */
61 v4l2_std_id stdid; 61 v4l2_std_id stdid;
62 u32 dv_preset;
63 struct v4l2_bt_timings bt_timings;
62 /* This is to track the last input that is passed to application */ 64 /* This is to track the last input that is passed to application */
63 u32 input_idx; 65 u32 input_idx;
64}; 66};
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 412c65d54fe1..cdf659abdc2a 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -38,6 +38,7 @@
38#include <media/adv7343.h> 38#include <media/adv7343.h>
39#include <media/v4l2-device.h> 39#include <media/v4l2-device.h>
40#include <media/v4l2-ioctl.h> 40#include <media/v4l2-ioctl.h>
41#include <media/v4l2-chip-ident.h>
41 42
42#include <mach/dm646x.h> 43#include <mach/dm646x.h>
43 44
@@ -84,17 +85,6 @@ static struct vpif_config_params config_params = {
84static struct vpif_device vpif_obj = { {NULL} }; 85static struct vpif_device vpif_obj = { {NULL} };
85static struct device *vpif_dev; 86static struct device *vpif_dev;
86 87
87static const struct vpif_channel_config_params ch_params[] = {
88 {
89 "NTSC", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
90 286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
91 },
92 {
93 "PAL", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
94 336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
95 },
96};
97
98/* 88/*
99 * vpif_uservirt_to_phys: This function is used to convert user 89 * vpif_uservirt_to_phys: This function is used to convert user
100 * space virtual address to physical address. 90 * space virtual address to physical address.
@@ -373,30 +363,54 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
373 return IRQ_HANDLED; 363 return IRQ_HANDLED;
374} 364}
375 365
376static int vpif_get_std_info(struct channel_obj *ch) 366static int vpif_update_std_info(struct channel_obj *ch)
377{ 367{
378 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
379 struct video_obj *vid_ch = &ch->video; 368 struct video_obj *vid_ch = &ch->video;
380 struct vpif_params *vpifparams = &ch->vpifparams; 369 struct vpif_params *vpifparams = &ch->vpifparams;
381 struct vpif_channel_config_params *std_info = &vpifparams->std_info; 370 struct vpif_channel_config_params *std_info = &vpifparams->std_info;
382 const struct vpif_channel_config_params *config; 371 const struct vpif_channel_config_params *config;
383 372
384 int index; 373 int i;
385
386 std_info->stdid = vid_ch->stdid;
387 if (!std_info->stdid)
388 return -1;
389 374
390 for (index = 0; index < ARRAY_SIZE(ch_params); index++) { 375 for (i = 0; i < vpif_ch_params_count; i++) {
391 config = &ch_params[index]; 376 config = &ch_params[i];
392 if (config->stdid & std_info->stdid) { 377 if (config->hd_sd == 0) {
393 memcpy(std_info, config, sizeof(*config)); 378 vpif_dbg(2, debug, "SD format\n");
394 break; 379 if (config->stdid & vid_ch->stdid) {
380 memcpy(std_info, config, sizeof(*config));
381 break;
382 }
383 } else {
384 vpif_dbg(2, debug, "HD format\n");
385 if (config->dv_preset == vid_ch->dv_preset) {
386 memcpy(std_info, config, sizeof(*config));
387 break;
388 }
395 } 389 }
396 } 390 }
397 391
398 if (index == ARRAY_SIZE(ch_params)) 392 if (i == vpif_ch_params_count) {
399 return -1; 393 vpif_dbg(1, debug, "Format not found\n");
394 return -EINVAL;
395 }
396
397 return 0;
398}
399
400static int vpif_update_resolution(struct channel_obj *ch)
401{
402 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
403 struct video_obj *vid_ch = &ch->video;
404 struct vpif_params *vpifparams = &ch->vpifparams;
405 struct vpif_channel_config_params *std_info = &vpifparams->std_info;
406
407 if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
408 return -EINVAL;
409
410 if (vid_ch->stdid || vid_ch->dv_preset) {
411 if (vpif_update_std_info(ch))
412 return -EINVAL;
413 }
400 414
401 common->fmt.fmt.pix.width = std_info->width; 415 common->fmt.fmt.pix.width = std_info->width;
402 common->fmt.fmt.pix.height = std_info->height; 416 common->fmt.fmt.pix.height = std_info->height;
@@ -404,8 +418,8 @@ static int vpif_get_std_info(struct channel_obj *ch)
404 common->fmt.fmt.pix.width, common->fmt.fmt.pix.height); 418 common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
405 419
406 /* Set height and width paramateres */ 420 /* Set height and width paramateres */
407 ch->common[VPIF_VIDEO_INDEX].height = std_info->height; 421 common->height = std_info->height;
408 ch->common[VPIF_VIDEO_INDEX].width = std_info->width; 422 common->width = std_info->width;
409 423
410 return 0; 424 return 0;
411} 425}
@@ -516,10 +530,8 @@ static int vpif_check_format(struct channel_obj *ch,
516 else 530 else
517 sizeimage = config_params.channel_bufsize[ch->channel_id]; 531 sizeimage = config_params.channel_bufsize[ch->channel_id];
518 532
519 if (vpif_get_std_info(ch)) { 533 if (vpif_update_resolution(ch))
520 vpif_err("Error getting the standard info\n");
521 return -EINVAL; 534 return -EINVAL;
522 }
523 535
524 hpitch = pixfmt->bytesperline; 536 hpitch = pixfmt->bytesperline;
525 vpitch = sizeimage / (hpitch * 2); 537 vpitch = sizeimage / (hpitch * 2);
@@ -568,7 +580,10 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
568static int vpif_mmap(struct file *filep, struct vm_area_struct *vma) 580static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
569{ 581{
570 struct vpif_fh *fh = filep->private_data; 582 struct vpif_fh *fh = filep->private_data;
571 struct common_obj *common = &fh->channel->common[VPIF_VIDEO_INDEX]; 583 struct channel_obj *ch = fh->channel;
584 struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
585
586 vpif_dbg(2, debug, "vpif_mmap\n");
572 587
573 return videobuf_mmap_mapper(&common->buffer_queue, vma); 588 return videobuf_mmap_mapper(&common->buffer_queue, vma);
574} 589}
@@ -637,9 +652,6 @@ static int vpif_release(struct file *filep)
637 struct channel_obj *ch = fh->channel; 652 struct channel_obj *ch = fh->channel;
638 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; 653 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
639 654
640 if (mutex_lock_interruptible(&common->lock))
641 return -ERESTARTSYS;
642
643 /* if this instance is doing IO */ 655 /* if this instance is doing IO */
644 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 656 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
645 /* Reset io_usrs member of channel object */ 657 /* Reset io_usrs member of channel object */
@@ -662,8 +674,6 @@ static int vpif_release(struct file *filep)
662 config_params.numbuffers[ch->channel_id]; 674 config_params.numbuffers[ch->channel_id];
663 } 675 }
664 676
665 mutex_unlock(&common->lock);
666
667 /* Decrement channel usrs counter */ 677 /* Decrement channel usrs counter */
668 atomic_dec(&ch->usrs); 678 atomic_dec(&ch->usrs);
669 /* If this file handle has initialize encoder device, reset it */ 679 /* If this file handle has initialize encoder device, reset it */
@@ -680,7 +690,12 @@ static int vpif_release(struct file *filep)
680} 690}
681 691
682/* functions implementing ioctls */ 692/* functions implementing ioctls */
683 693/**
694 * vpif_querycap() - QUERYCAP handler
695 * @file: file ptr
696 * @priv: file handle
697 * @cap: ptr to v4l2_capability structure
698 */
684static int vpif_querycap(struct file *file, void *priv, 699static int vpif_querycap(struct file *file, void *priv,
685 struct v4l2_capability *cap) 700 struct v4l2_capability *cap)
686{ 701{
@@ -722,17 +737,9 @@ static int vpif_g_fmt_vid_out(struct file *file, void *priv,
722 if (common->fmt.type != fmt->type) 737 if (common->fmt.type != fmt->type)
723 return -EINVAL; 738 return -EINVAL;
724 739
725 /* Fill in the information about format */ 740 if (vpif_update_resolution(ch))
726 if (mutex_lock_interruptible(&common->lock))
727 return -ERESTARTSYS;
728
729 if (vpif_get_std_info(ch)) {
730 vpif_err("Error getting the standard info\n");
731 return -EINVAL; 741 return -EINVAL;
732 }
733
734 *fmt = common->fmt; 742 *fmt = common->fmt;
735 mutex_unlock(&common->lock);
736 return 0; 743 return 0;
737} 744}
738 745
@@ -773,12 +780,7 @@ static int vpif_s_fmt_vid_out(struct file *file, void *priv,
773 /* store the pix format in the channel object */ 780 /* store the pix format in the channel object */
774 common->fmt.fmt.pix = *pixfmt; 781 common->fmt.fmt.pix = *pixfmt;
775 /* store the format in the channel object */ 782 /* store the format in the channel object */
776 if (mutex_lock_interruptible(&common->lock))
777 return -ERESTARTSYS;
778
779 common->fmt = *fmt; 783 common->fmt = *fmt;
780 mutex_unlock(&common->lock);
781
782 return 0; 784 return 0;
783} 785}
784 786
@@ -808,7 +810,6 @@ static int vpif_reqbufs(struct file *file, void *priv,
808 struct common_obj *common; 810 struct common_obj *common;
809 enum v4l2_field field; 811 enum v4l2_field field;
810 u8 index = 0; 812 u8 index = 0;
811 int ret = 0;
812 813
813 /* This file handle has not initialized the channel, 814 /* This file handle has not initialized the channel,
814 It is not allowed to do settings */ 815 It is not allowed to do settings */
@@ -826,18 +827,12 @@ static int vpif_reqbufs(struct file *file, void *priv,
826 index = VPIF_VIDEO_INDEX; 827 index = VPIF_VIDEO_INDEX;
827 828
828 common = &ch->common[index]; 829 common = &ch->common[index];
829 if (mutex_lock_interruptible(&common->lock))
830 return -ERESTARTSYS;
831 830
832 if (common->fmt.type != reqbuf->type) { 831 if (common->fmt.type != reqbuf->type)
833 ret = -EINVAL; 832 return -EINVAL;
834 goto reqbuf_exit;
835 }
836 833
837 if (0 != common->io_usrs) { 834 if (0 != common->io_usrs)
838 ret = -EBUSY; 835 return -EBUSY;
839 goto reqbuf_exit;
840 }
841 836
842 if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 837 if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
843 if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY) 838 if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
@@ -854,7 +849,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
854 &common->irqlock, 849 &common->irqlock,
855 reqbuf->type, field, 850 reqbuf->type, field,
856 sizeof(struct videobuf_buffer), fh, 851 sizeof(struct videobuf_buffer), fh,
857 NULL); 852 &common->lock);
858 853
859 /* Set io allowed member of file handle to TRUE */ 854 /* Set io allowed member of file handle to TRUE */
860 fh->io_allowed[index] = 1; 855 fh->io_allowed[index] = 1;
@@ -865,11 +860,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
865 INIT_LIST_HEAD(&common->dma_queue); 860 INIT_LIST_HEAD(&common->dma_queue);
866 861
867 /* Allocate buffers */ 862 /* Allocate buffers */
868 ret = videobuf_reqbufs(&common->buffer_queue, reqbuf); 863 return videobuf_reqbufs(&common->buffer_queue, reqbuf);
869
870reqbuf_exit:
871 mutex_unlock(&common->lock);
872 return ret;
873} 864}
874 865
875static int vpif_querybuf(struct file *file, void *priv, 866static int vpif_querybuf(struct file *file, void *priv,
@@ -990,22 +981,19 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
990 } 981 }
991 982
992 /* Call encoder subdevice function to set the standard */ 983 /* Call encoder subdevice function to set the standard */
993 if (mutex_lock_interruptible(&common->lock))
994 return -ERESTARTSYS;
995
996 ch->video.stdid = *std_id; 984 ch->video.stdid = *std_id;
985 ch->video.dv_preset = V4L2_DV_INVALID;
986 memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
987
997 /* Get the information about the standard */ 988 /* Get the information about the standard */
998 if (vpif_get_std_info(ch)) { 989 if (vpif_update_resolution(ch))
999 vpif_err("Error getting the standard info\n");
1000 return -EINVAL; 990 return -EINVAL;
1001 }
1002 991
1003 if ((ch->vpifparams.std_info.width * 992 if ((ch->vpifparams.std_info.width *
1004 ch->vpifparams.std_info.height * 2) > 993 ch->vpifparams.std_info.height * 2) >
1005 config_params.channel_bufsize[ch->channel_id]) { 994 config_params.channel_bufsize[ch->channel_id]) {
1006 vpif_err("invalid std for this size\n"); 995 vpif_err("invalid std for this size\n");
1007 ret = -EINVAL; 996 return -EINVAL;
1008 goto s_std_exit;
1009 } 997 }
1010 998
1011 common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width; 999 common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
@@ -1016,16 +1004,13 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
1016 s_std_output, *std_id); 1004 s_std_output, *std_id);
1017 if (ret < 0) { 1005 if (ret < 0) {
1018 vpif_err("Failed to set output standard\n"); 1006 vpif_err("Failed to set output standard\n");
1019 goto s_std_exit; 1007 return ret;
1020 } 1008 }
1021 1009
1022 ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core, 1010 ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
1023 s_std, *std_id); 1011 s_std, *std_id);
1024 if (ret < 0) 1012 if (ret < 0)
1025 vpif_err("Failed to set standard for sub devices\n"); 1013 vpif_err("Failed to set standard for sub devices\n");
1026
1027s_std_exit:
1028 mutex_unlock(&common->lock);
1029 return ret; 1014 return ret;
1030} 1015}
1031 1016
@@ -1090,21 +1075,17 @@ static int vpif_streamon(struct file *file, void *priv,
1090 if (ret < 0) 1075 if (ret < 0)
1091 return ret; 1076 return ret;
1092 1077
1093 /* Call videobuf_streamon to start streaming in videobuf */ 1078 /* Call videobuf_streamon to start streaming in videobuf */
1094 ret = videobuf_streamon(&common->buffer_queue); 1079 ret = videobuf_streamon(&common->buffer_queue);
1095 if (ret < 0) { 1080 if (ret < 0) {
1096 vpif_err("videobuf_streamon\n"); 1081 vpif_err("videobuf_streamon\n");
1097 return ret; 1082 return ret;
1098 } 1083 }
1099 1084
1100 if (mutex_lock_interruptible(&common->lock))
1101 return -ERESTARTSYS;
1102
1103 /* If buffer queue is empty, return error */ 1085 /* If buffer queue is empty, return error */
1104 if (list_empty(&common->dma_queue)) { 1086 if (list_empty(&common->dma_queue)) {
1105 vpif_err("buffer queue is empty\n"); 1087 vpif_err("buffer queue is empty\n");
1106 ret = -EIO; 1088 return -EIO;
1107 goto streamon_exit;
1108 } 1089 }
1109 1090
1110 /* Get the next frame from the buffer queue */ 1091 /* Get the next frame from the buffer queue */
@@ -1130,8 +1111,7 @@ static int vpif_streamon(struct file *file, void *priv,
1130 || (!ch->vpifparams.std_info.frm_fmt 1111 || (!ch->vpifparams.std_info.frm_fmt
1131 && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) { 1112 && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
1132 vpif_err("conflict in field format and std format\n"); 1113 vpif_err("conflict in field format and std format\n");
1133 ret = -EINVAL; 1114 return -EINVAL;
1134 goto streamon_exit;
1135 } 1115 }
1136 1116
1137 /* clock settings */ 1117 /* clock settings */
@@ -1140,13 +1120,13 @@ static int vpif_streamon(struct file *file, void *priv,
1140 ch->vpifparams.std_info.hd_sd); 1120 ch->vpifparams.std_info.hd_sd);
1141 if (ret < 0) { 1121 if (ret < 0) {
1142 vpif_err("can't set clock\n"); 1122 vpif_err("can't set clock\n");
1143 goto streamon_exit; 1123 return ret;
1144 } 1124 }
1145 1125
1146 /* set the parameters and addresses */ 1126 /* set the parameters and addresses */
1147 ret = vpif_set_video_params(vpif, ch->channel_id + 2); 1127 ret = vpif_set_video_params(vpif, ch->channel_id + 2);
1148 if (ret < 0) 1128 if (ret < 0)
1149 goto streamon_exit; 1129 return ret;
1150 1130
1151 common->started = ret; 1131 common->started = ret;
1152 vpif_config_addr(ch, ret); 1132 vpif_config_addr(ch, ret);
@@ -1171,9 +1151,6 @@ static int vpif_streamon(struct file *file, void *priv,
1171 } 1151 }
1172 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; 1152 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
1173 } 1153 }
1174
1175streamon_exit:
1176 mutex_unlock(&common->lock);
1177 return ret; 1154 return ret;
1178} 1155}
1179 1156
@@ -1199,9 +1176,6 @@ static int vpif_streamoff(struct file *file, void *priv,
1199 return -EINVAL; 1176 return -EINVAL;
1200 } 1177 }
1201 1178
1202 if (mutex_lock_interruptible(&common->lock))
1203 return -ERESTARTSYS;
1204
1205 if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 1179 if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
1206 /* disable channel */ 1180 /* disable channel */
1207 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { 1181 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
@@ -1216,8 +1190,6 @@ static int vpif_streamoff(struct file *file, void *priv,
1216 } 1190 }
1217 1191
1218 common->started = 0; 1192 common->started = 0;
1219 mutex_unlock(&common->lock);
1220
1221 return videobuf_streamoff(&common->buffer_queue); 1193 return videobuf_streamoff(&common->buffer_queue);
1222} 1194}
1223 1195
@@ -1264,13 +1236,9 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
1264 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; 1236 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1265 int ret = 0; 1237 int ret = 0;
1266 1238
1267 if (mutex_lock_interruptible(&common->lock))
1268 return -ERESTARTSYS;
1269
1270 if (common->started) { 1239 if (common->started) {
1271 vpif_err("Streaming in progress\n"); 1240 vpif_err("Streaming in progress\n");
1272 ret = -EBUSY; 1241 return -EBUSY;
1273 goto s_output_exit;
1274 } 1242 }
1275 1243
1276 ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, 1244 ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
@@ -1280,9 +1248,6 @@ static int vpif_s_output(struct file *file, void *priv, unsigned int i)
1280 vpif_err("Failed to set output standard\n"); 1248 vpif_err("Failed to set output standard\n");
1281 1249
1282 vid_ch->output_id = i; 1250 vid_ch->output_id = i;
1283
1284s_output_exit:
1285 mutex_unlock(&common->lock);
1286 return ret; 1251 return ret;
1287} 1252}
1288 1253
@@ -1315,6 +1280,287 @@ static int vpif_s_priority(struct file *file, void *priv, enum v4l2_priority p)
1315 return v4l2_prio_change(&ch->prio, &fh->prio, p); 1280 return v4l2_prio_change(&ch->prio, &fh->prio, p);
1316} 1281}
1317 1282
1283/**
1284 * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
1285 * @file: file ptr
1286 * @priv: file handle
1287 * @preset: input preset
1288 */
1289static int vpif_enum_dv_presets(struct file *file, void *priv,
1290 struct v4l2_dv_enum_preset *preset)
1291{
1292 struct vpif_fh *fh = priv;
1293 struct channel_obj *ch = fh->channel;
1294 struct video_obj *vid_ch = &ch->video;
1295
1296 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
1297 video, enum_dv_presets, preset);
1298}
1299
1300/**
1301 * vpif_s_dv_presets() - S_DV_PRESETS handler
1302 * @file: file ptr
1303 * @priv: file handle
1304 * @preset: input preset
1305 */
1306static int vpif_s_dv_preset(struct file *file, void *priv,
1307 struct v4l2_dv_preset *preset)
1308{
1309 struct vpif_fh *fh = priv;
1310 struct channel_obj *ch = fh->channel;
1311 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
1312 struct video_obj *vid_ch = &ch->video;
1313 int ret = 0;
1314
1315 if (common->started) {
1316 vpif_dbg(1, debug, "streaming in progress\n");
1317 return -EBUSY;
1318 }
1319
1320 ret = v4l2_prio_check(&ch->prio, fh->prio);
1321 if (ret != 0)
1322 return ret;
1323
1324 fh->initialized = 1;
1325
1326 /* Call encoder subdevice function to set the standard */
1327 if (mutex_lock_interruptible(&common->lock))
1328 return -ERESTARTSYS;
1329
1330 ch->video.dv_preset = preset->preset;
1331 ch->video.stdid = V4L2_STD_UNKNOWN;
1332 memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
1333
1334 /* Get the information about the standard */
1335 if (vpif_update_resolution(ch)) {
1336 ret = -EINVAL;
1337 } else {
1338 /* Configure the default format information */
1339 vpif_config_format(ch);
1340
1341 ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
1342 video, s_dv_preset, preset);
1343 }
1344
1345 mutex_unlock(&common->lock);
1346
1347 return ret;
1348}
1349/**
1350 * vpif_g_dv_presets() - G_DV_PRESETS handler
1351 * @file: file ptr
1352 * @priv: file handle
1353 * @preset: input preset
1354 */
1355static int vpif_g_dv_preset(struct file *file, void *priv,
1356 struct v4l2_dv_preset *preset)
1357{
1358 struct vpif_fh *fh = priv;
1359 struct channel_obj *ch = fh->channel;
1360
1361 preset->preset = ch->video.dv_preset;
1362
1363 return 0;
1364}
1365/**
1366 * vpif_s_dv_timings() - S_DV_TIMINGS handler
1367 * @file: file ptr
1368 * @priv: file handle
1369 * @timings: digital video timings
1370 */
1371static int vpif_s_dv_timings(struct file *file, void *priv,
1372 struct v4l2_dv_timings *timings)
1373{
1374 struct vpif_fh *fh = priv;
1375 struct channel_obj *ch = fh->channel;
1376 struct vpif_params *vpifparams = &ch->vpifparams;
1377 struct vpif_channel_config_params *std_info = &vpifparams->std_info;
1378 struct video_obj *vid_ch = &ch->video;
1379 struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
1380 int ret;
1381
1382 if (timings->type != V4L2_DV_BT_656_1120) {
1383 vpif_dbg(2, debug, "Timing type not defined\n");
1384 return -EINVAL;
1385 }
1386
1387 /* Configure subdevice timings, if any */
1388 ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
1389 video, s_dv_timings, timings);
1390 if (ret == -ENOIOCTLCMD) {
1391 vpif_dbg(2, debug, "Custom DV timings not supported by "
1392 "subdevice\n");
1393 return -EINVAL;
1394 }
1395 if (ret < 0) {
1396 vpif_dbg(2, debug, "Error setting custom DV timings\n");
1397 return ret;
1398 }
1399
1400 if (!(timings->bt.width && timings->bt.height &&
1401 (timings->bt.hbackporch ||
1402 timings->bt.hfrontporch ||
1403 timings->bt.hsync) &&
1404 timings->bt.vfrontporch &&
1405 (timings->bt.vbackporch ||
1406 timings->bt.vsync))) {
1407 vpif_dbg(2, debug, "Timings for width, height, "
1408 "horizontal back porch, horizontal sync, "
1409 "horizontal front porch, vertical back porch, "
1410 "vertical sync and vertical back porch "
1411 "must be defined\n");
1412 return -EINVAL;
1413 }
1414
1415 *bt = timings->bt;
1416
1417 /* Configure video port timings */
1418
1419 std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
1420 bt->hsync - 8;
1421 std_info->sav2eav = bt->width;
1422
1423 std_info->l1 = 1;
1424 std_info->l3 = bt->vsync + bt->vbackporch + 1;
1425
1426 if (bt->interlaced) {
1427 if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
1428 std_info->vsize = bt->height * 2 +
1429 bt->vfrontporch + bt->vsync + bt->vbackporch +
1430 bt->il_vfrontporch + bt->il_vsync +
1431 bt->il_vbackporch;
1432 std_info->l5 = std_info->vsize/2 -
1433 (bt->vfrontporch - 1);
1434 std_info->l7 = std_info->vsize/2 + 1;
1435 std_info->l9 = std_info->l7 + bt->il_vsync +
1436 bt->il_vbackporch + 1;
1437 std_info->l11 = std_info->vsize -
1438 (bt->il_vfrontporch - 1);
1439 } else {
1440 vpif_dbg(2, debug, "Required timing values for "
1441 "interlaced BT format missing\n");
1442 return -EINVAL;
1443 }
1444 } else {
1445 std_info->vsize = bt->height + bt->vfrontporch +
1446 bt->vsync + bt->vbackporch;
1447 std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
1448 }
1449 strncpy(std_info->name, "Custom timings BT656/1120",
1450 VPIF_MAX_NAME);
1451 std_info->width = bt->width;
1452 std_info->height = bt->height;
1453 std_info->frm_fmt = bt->interlaced ? 0 : 1;
1454 std_info->ycmux_mode = 0;
1455 std_info->capture_format = 0;
1456 std_info->vbi_supported = 0;
1457 std_info->hd_sd = 1;
1458 std_info->stdid = 0;
1459 std_info->dv_preset = V4L2_DV_INVALID;
1460
1461 vid_ch->stdid = 0;
1462 vid_ch->dv_preset = V4L2_DV_INVALID;
1463
1464 return 0;
1465}
1466
1467/**
1468 * vpif_g_dv_timings() - G_DV_TIMINGS handler
1469 * @file: file ptr
1470 * @priv: file handle
1471 * @timings: digital video timings
1472 */
1473static int vpif_g_dv_timings(struct file *file, void *priv,
1474 struct v4l2_dv_timings *timings)
1475{
1476 struct vpif_fh *fh = priv;
1477 struct channel_obj *ch = fh->channel;
1478 struct video_obj *vid_ch = &ch->video;
1479 struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
1480
1481 timings->bt = *bt;
1482
1483 return 0;
1484}
1485
1486/*
1487 * vpif_g_chip_ident() - Identify the chip
1488 * @file: file ptr
1489 * @priv: file handle
1490 * @chip: chip identity
1491 *
1492 * Returns zero or -EINVAL if read operations fails.
1493 */
1494static int vpif_g_chip_ident(struct file *file, void *priv,
1495 struct v4l2_dbg_chip_ident *chip)
1496{
1497 chip->ident = V4L2_IDENT_NONE;
1498 chip->revision = 0;
1499 if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
1500 chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
1501 vpif_dbg(2, debug, "match_type is invalid.\n");
1502 return -EINVAL;
1503 }
1504
1505 return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
1506 g_chip_ident, chip);
1507}
1508
1509#ifdef CONFIG_VIDEO_ADV_DEBUG
1510/*
1511 * vpif_dbg_g_register() - Read register
1512 * @file: file ptr
1513 * @priv: file handle
1514 * @reg: register to be read
1515 *
1516 * Debugging only
1517 * Returns zero or -EINVAL if read operations fails.
1518 */
1519static int vpif_dbg_g_register(struct file *file, void *priv,
1520 struct v4l2_dbg_register *reg){
1521 struct vpif_fh *fh = priv;
1522 struct channel_obj *ch = fh->channel;
1523 struct video_obj *vid_ch = &ch->video;
1524
1525 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
1526 g_register, reg);
1527}
1528
1529/*
1530 * vpif_dbg_s_register() - Write to register
1531 * @file: file ptr
1532 * @priv: file handle
1533 * @reg: register to be modified
1534 *
1535 * Debugging only
1536 * Returns zero or -EINVAL if write operations fails.
1537 */
1538static int vpif_dbg_s_register(struct file *file, void *priv,
1539 struct v4l2_dbg_register *reg){
1540 struct vpif_fh *fh = priv;
1541 struct channel_obj *ch = fh->channel;
1542 struct video_obj *vid_ch = &ch->video;
1543
1544 return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
1545 s_register, reg);
1546}
1547#endif
1548
1549/*
1550 * vpif_log_status() - Status information
1551 * @file: file ptr
1552 * @priv: file handle
1553 *
1554 * Returns zero.
1555 */
1556static int vpif_log_status(struct file *filep, void *priv)
1557{
1558 /* status for sub devices */
1559 v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
1560
1561 return 0;
1562}
1563
1318/* vpif display ioctl operations */ 1564/* vpif display ioctl operations */
1319static const struct v4l2_ioctl_ops vpif_ioctl_ops = { 1565static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
1320 .vidioc_querycap = vpif_querycap, 1566 .vidioc_querycap = vpif_querycap,
@@ -1336,13 +1582,24 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
1336 .vidioc_s_output = vpif_s_output, 1582 .vidioc_s_output = vpif_s_output,
1337 .vidioc_g_output = vpif_g_output, 1583 .vidioc_g_output = vpif_g_output,
1338 .vidioc_cropcap = vpif_cropcap, 1584 .vidioc_cropcap = vpif_cropcap,
1585 .vidioc_enum_dv_presets = vpif_enum_dv_presets,
1586 .vidioc_s_dv_preset = vpif_s_dv_preset,
1587 .vidioc_g_dv_preset = vpif_g_dv_preset,
1588 .vidioc_s_dv_timings = vpif_s_dv_timings,
1589 .vidioc_g_dv_timings = vpif_g_dv_timings,
1590 .vidioc_g_chip_ident = vpif_g_chip_ident,
1591#ifdef CONFIG_VIDEO_ADV_DEBUG
1592 .vidioc_g_register = vpif_dbg_g_register,
1593 .vidioc_s_register = vpif_dbg_s_register,
1594#endif
1595 .vidioc_log_status = vpif_log_status,
1339}; 1596};
1340 1597
1341static const struct v4l2_file_operations vpif_fops = { 1598static const struct v4l2_file_operations vpif_fops = {
1342 .owner = THIS_MODULE, 1599 .owner = THIS_MODULE,
1343 .open = vpif_open, 1600 .open = vpif_open,
1344 .release = vpif_release, 1601 .release = vpif_release,
1345 .ioctl = video_ioctl2, 1602 .unlocked_ioctl = video_ioctl2,
1346 .mmap = vpif_mmap, 1603 .mmap = vpif_mmap,
1347 .poll = vpif_poll 1604 .poll = vpif_poll
1348}; 1605};
@@ -1526,6 +1783,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1526 v4l2_prio_init(&ch->prio); 1783 v4l2_prio_init(&ch->prio);
1527 ch->common[VPIF_VIDEO_INDEX].fmt.type = 1784 ch->common[VPIF_VIDEO_INDEX].fmt.type =
1528 V4L2_BUF_TYPE_VIDEO_OUTPUT; 1785 V4L2_BUF_TYPE_VIDEO_OUTPUT;
1786 ch->video_dev->lock = &common->lock;
1529 1787
1530 /* register video device */ 1788 /* register video device */
1531 vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n", 1789 vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1565,6 +1823,8 @@ static __init int vpif_probe(struct platform_device *pdev)
1565 vpif_obj.sd[i]->grp_id = 1 << i; 1823 vpif_obj.sd[i]->grp_id = 1 << i;
1566 } 1824 }
1567 1825
1826 v4l2_info(&vpif_obj.v4l2_dev,
1827 "DM646x VPIF display driver initialized\n");
1568 return 0; 1828 return 0;
1569 1829
1570probe_subdev_out: 1830probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index a2a7cd166bbf..b53aaa883075 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -67,6 +67,8 @@ struct video_obj {
67 * most recent displayed frame only */ 67 * most recent displayed frame only */
68 v4l2_std_id stdid; /* Currently selected or default 68 v4l2_std_id stdid; /* Currently selected or default
69 * standard */ 69 * standard */
70 u32 dv_preset;
71 struct v4l2_bt_timings bt_timings;
70 u32 output_id; /* Current output id */ 72 u32 output_id; /* Current output id */
71}; 73};
72 74
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 099d5df8c572..87f77a34eeab 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -33,6 +33,7 @@
33#include <media/saa7115.h> 33#include <media/saa7115.h>
34#include <media/tvp5150.h> 34#include <media/tvp5150.h>
35#include <media/tvaudio.h> 35#include <media/tvaudio.h>
36#include <media/mt9v011.h>
36#include <media/i2c-addr.h> 37#include <media/i2c-addr.h>
37#include <media/tveeprom.h> 38#include <media/tveeprom.h>
38#include <media/v4l2-common.h> 39#include <media/v4l2-common.h>
@@ -1917,11 +1918,6 @@ static unsigned short tvp5150_addrs[] = {
1917 I2C_CLIENT_END 1918 I2C_CLIENT_END
1918}; 1919};
1919 1920
1920static unsigned short mt9v011_addrs[] = {
1921 0xba >> 1,
1922 I2C_CLIENT_END
1923};
1924
1925static unsigned short msp3400_addrs[] = { 1921static unsigned short msp3400_addrs[] = {
1926 0x80 >> 1, 1922 0x80 >> 1,
1927 0x88 >> 1, 1923 0x88 >> 1,
@@ -2437,6 +2433,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
2437 dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW; 2433 dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
2438 dev->init_data.get_key = em28xx_get_key_em_haup; 2434 dev->init_data.get_key = em28xx_get_key_em_haup;
2439 dev->init_data.name = "i2c IR (EM2840 Hauppauge)"; 2435 dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
2436 break;
2440 case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE: 2437 case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
2441 dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE; 2438 dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
2442 dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe; 2439 dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
@@ -2623,11 +2620,17 @@ void em28xx_card_setup(struct em28xx *dev)
2623 "tvp5150", 0, tvp5150_addrs); 2620 "tvp5150", 0, tvp5150_addrs);
2624 2621
2625 if (dev->em28xx_sensor == EM28XX_MT9V011) { 2622 if (dev->em28xx_sensor == EM28XX_MT9V011) {
2623 struct mt9v011_platform_data pdata;
2624 struct i2c_board_info mt9v011_info = {
2625 .type = "mt9v011",
2626 .addr = 0xba >> 1,
2627 .platform_data = &pdata,
2628 };
2626 struct v4l2_subdev *sd; 2629 struct v4l2_subdev *sd;
2627 2630
2628 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 2631 pdata.xtal = dev->sensor_xtal;
2629 &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs); 2632 sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
2630 v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); 2633 &mt9v011_info, NULL);
2631 } 2634 }
2632 2635
2633 2636
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index cc77d144df3c..bf66189cb26d 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -59,31 +59,7 @@
59/*****************************************************************************/ 59/*****************************************************************************/
60 60
61static const struct usb_device_id et61x251_id_table[] = { 61static const struct usb_device_id et61x251_id_table[] = {
62 { USB_DEVICE(0x102c, 0x6151), },
63 { USB_DEVICE(0x102c, 0x6251), }, 62 { USB_DEVICE(0x102c, 0x6251), },
64 { USB_DEVICE(0x102c, 0x6253), },
65 { USB_DEVICE(0x102c, 0x6254), },
66 { USB_DEVICE(0x102c, 0x6255), },
67 { USB_DEVICE(0x102c, 0x6256), },
68 { USB_DEVICE(0x102c, 0x6257), },
69 { USB_DEVICE(0x102c, 0x6258), },
70 { USB_DEVICE(0x102c, 0x6259), },
71 { USB_DEVICE(0x102c, 0x625a), },
72 { USB_DEVICE(0x102c, 0x625b), },
73 { USB_DEVICE(0x102c, 0x625c), },
74 { USB_DEVICE(0x102c, 0x625d), },
75 { USB_DEVICE(0x102c, 0x625e), },
76 { USB_DEVICE(0x102c, 0x625f), },
77 { USB_DEVICE(0x102c, 0x6260), },
78 { USB_DEVICE(0x102c, 0x6261), },
79 { USB_DEVICE(0x102c, 0x6262), },
80 { USB_DEVICE(0x102c, 0x6263), },
81 { USB_DEVICE(0x102c, 0x6264), },
82 { USB_DEVICE(0x102c, 0x6265), },
83 { USB_DEVICE(0x102c, 0x6266), },
84 { USB_DEVICE(0x102c, 0x6267), },
85 { USB_DEVICE(0x102c, 0x6268), },
86 { USB_DEVICE(0x102c, 0x6269), },
87 { } 63 { }
88}; 64};
89 65
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 629043933501..a09c4709d613 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -276,7 +276,7 @@ static const struct sd_desc sd_desc = {
276}; 276};
277 277
278/* -- module initialisation -- */ 278/* -- module initialisation -- */
279static const __devinitdata struct usb_device_id device_table[] = { 279static const struct usb_device_id device_table[] = {
280 {USB_DEVICE(0x04a5, 0x3035)}, 280 {USB_DEVICE(0x04a5, 0x3035)},
281 {} 281 {}
282}; 282};
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 1eacb6c7926d..8b398493f96b 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1040,14 +1040,14 @@ static const struct sd_desc sd_desc = {
1040}; 1040};
1041 1041
1042/* -- module initialisation -- */ 1042/* -- module initialisation -- */
1043static const struct usb_device_id device_table[] __devinitconst = { 1043static const struct usb_device_id device_table[] = {
1044 {USB_DEVICE(0x0572, 0x0041)}, 1044 {USB_DEVICE(0x0572, 0x0041)},
1045 {} 1045 {}
1046}; 1046};
1047MODULE_DEVICE_TABLE(usb, device_table); 1047MODULE_DEVICE_TABLE(usb, device_table);
1048 1048
1049/* -- device connect -- */ 1049/* -- device connect -- */
1050static int __devinit sd_probe(struct usb_interface *intf, 1050static int sd_probe(struct usb_interface *intf,
1051 const struct usb_device_id *id) 1051 const struct usb_device_id *id)
1052{ 1052{
1053 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1053 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index c1ae05f4661f..4bf2cab98d64 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2088,7 +2088,7 @@ static const struct sd_desc sd_desc = {
2088}; 2088};
2089 2089
2090/* -- module initialisation -- */ 2090/* -- module initialisation -- */
2091static const __devinitdata struct usb_device_id device_table[] = { 2091static const struct usb_device_id device_table[] = {
2092 {USB_DEVICE(0x0553, 0x0002)}, 2092 {USB_DEVICE(0x0553, 0x0002)},
2093 {USB_DEVICE(0x0813, 0x0001)}, 2093 {USB_DEVICE(0x0813, 0x0001)},
2094 {} 2094 {}
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index a594b36d6199..4b2c483fce6f 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@ static const struct sd_desc sd_desc = {
864}; 864};
865 865
866/* -- module initialisation -- */ 866/* -- module initialisation -- */
867static const struct usb_device_id device_table[] __devinitconst = { 867static const struct usb_device_id device_table[] = {
868 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, 868 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
869#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE 869#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
870 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, 870 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
875MODULE_DEVICE_TABLE(usb, device_table); 875MODULE_DEVICE_TABLE(usb, device_table);
876 876
877/* -- device connect -- */ 877/* -- device connect -- */
878static int __devinit sd_probe(struct usb_interface *intf, 878static int sd_probe(struct usb_interface *intf,
879 const struct usb_device_id *id) 879 const struct usb_device_id *id)
880{ 880{
881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 881 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index d78226455d1f..987b4b69d7ab 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -229,7 +229,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
229} 229}
230 230
231/* Table of supported USB devices */ 231/* Table of supported USB devices */
232static const __devinitdata struct usb_device_id device_table[] = { 232static const struct usb_device_id device_table[] = {
233 {USB_DEVICE(0x04cb, 0x0104)}, 233 {USB_DEVICE(0x04cb, 0x0104)},
234 {USB_DEVICE(0x04cb, 0x0109)}, 234 {USB_DEVICE(0x04cb, 0x0109)},
235 {USB_DEVICE(0x04cb, 0x010b)}, 235 {USB_DEVICE(0x04cb, 0x010b)},
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index b05bec7321b5..99083038cec3 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -488,7 +488,7 @@ static void sd_callback(struct gspca_dev *gspca_dev)
488 488
489/*=================== USB driver structure initialisation ==================*/ 489/*=================== USB driver structure initialisation ==================*/
490 490
491static const __devinitdata struct usb_device_id device_table[] = { 491static const struct usb_device_id device_table[] = {
492 {USB_DEVICE(0x05e3, 0x0503)}, 492 {USB_DEVICE(0x05e3, 0x0503)},
493 {USB_DEVICE(0x05e3, 0xf191)}, 493 {USB_DEVICE(0x05e3, 0xf191)},
494 {} 494 {}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 442970073e8a..f21f2a258ae0 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
55MODULE_DESCRIPTION("GSPCA USB Camera Driver"); 55MODULE_DESCRIPTION("GSPCA USB Camera Driver");
56MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
57 57
58#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 11, 0) 58#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 12, 0)
59 59
60#ifdef GSPCA_DEBUG 60#ifdef GSPCA_DEBUG
61int gspca_debug = D_ERR | D_PROBE; 61int gspca_debug = D_ERR | D_PROBE;
@@ -508,8 +508,8 @@ static int gspca_is_compressed(__u32 format)
508 return 0; 508 return 0;
509} 509}
510 510
511static int frame_alloc(struct gspca_dev *gspca_dev, 511static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
512 unsigned int count) 512 enum v4l2_memory memory, unsigned int count)
513{ 513{
514 struct gspca_frame *frame; 514 struct gspca_frame *frame;
515 unsigned int frsz; 515 unsigned int frsz;
@@ -519,7 +519,6 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
519 frsz = gspca_dev->cam.cam_mode[i].sizeimage; 519 frsz = gspca_dev->cam.cam_mode[i].sizeimage;
520 PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz); 520 PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
521 frsz = PAGE_ALIGN(frsz); 521 frsz = PAGE_ALIGN(frsz);
522 gspca_dev->frsz = frsz;
523 if (count >= GSPCA_MAX_FRAMES) 522 if (count >= GSPCA_MAX_FRAMES)
524 count = GSPCA_MAX_FRAMES - 1; 523 count = GSPCA_MAX_FRAMES - 1;
525 gspca_dev->frbuf = vmalloc_32(frsz * count); 524 gspca_dev->frbuf = vmalloc_32(frsz * count);
@@ -527,6 +526,9 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
527 err("frame alloc failed"); 526 err("frame alloc failed");
528 return -ENOMEM; 527 return -ENOMEM;
529 } 528 }
529 gspca_dev->capt_file = file;
530 gspca_dev->memory = memory;
531 gspca_dev->frsz = frsz;
530 gspca_dev->nframes = count; 532 gspca_dev->nframes = count;
531 for (i = 0; i < count; i++) { 533 for (i = 0; i < count; i++) {
532 frame = &gspca_dev->frame[i]; 534 frame = &gspca_dev->frame[i];
@@ -535,7 +537,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev,
535 frame->v4l2_buf.flags = 0; 537 frame->v4l2_buf.flags = 0;
536 frame->v4l2_buf.field = V4L2_FIELD_NONE; 538 frame->v4l2_buf.field = V4L2_FIELD_NONE;
537 frame->v4l2_buf.length = frsz; 539 frame->v4l2_buf.length = frsz;
538 frame->v4l2_buf.memory = gspca_dev->memory; 540 frame->v4l2_buf.memory = memory;
539 frame->v4l2_buf.sequence = 0; 541 frame->v4l2_buf.sequence = 0;
540 frame->data = gspca_dev->frbuf + i * frsz; 542 frame->data = gspca_dev->frbuf + i * frsz;
541 frame->v4l2_buf.m.offset = i * frsz; 543 frame->v4l2_buf.m.offset = i * frsz;
@@ -558,6 +560,9 @@ static void frame_free(struct gspca_dev *gspca_dev)
558 gspca_dev->frame[i].data = NULL; 560 gspca_dev->frame[i].data = NULL;
559 } 561 }
560 gspca_dev->nframes = 0; 562 gspca_dev->nframes = 0;
563 gspca_dev->frsz = 0;
564 gspca_dev->capt_file = NULL;
565 gspca_dev->memory = GSPCA_MEMORY_NO;
561} 566}
562 567
563static void destroy_urbs(struct gspca_dev *gspca_dev) 568static void destroy_urbs(struct gspca_dev *gspca_dev)
@@ -1210,29 +1215,15 @@ static void gspca_release(struct video_device *vfd)
1210static int dev_open(struct file *file) 1215static int dev_open(struct file *file)
1211{ 1216{
1212 struct gspca_dev *gspca_dev; 1217 struct gspca_dev *gspca_dev;
1213 int ret;
1214 1218
1215 PDEBUG(D_STREAM, "[%s] open", current->comm); 1219 PDEBUG(D_STREAM, "[%s] open", current->comm);
1216 gspca_dev = (struct gspca_dev *) video_devdata(file); 1220 gspca_dev = (struct gspca_dev *) video_devdata(file);
1217 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 1221 if (!gspca_dev->present)
1218 return -ERESTARTSYS; 1222 return -ENODEV;
1219 if (!gspca_dev->present) {
1220 ret = -ENODEV;
1221 goto out;
1222 }
1223
1224 if (gspca_dev->users > 4) { /* (arbitrary value) */
1225 ret = -EBUSY;
1226 goto out;
1227 }
1228 1223
1229 /* protect the subdriver against rmmod */ 1224 /* protect the subdriver against rmmod */
1230 if (!try_module_get(gspca_dev->module)) { 1225 if (!try_module_get(gspca_dev->module))
1231 ret = -ENODEV; 1226 return -ENODEV;
1232 goto out;
1233 }
1234
1235 gspca_dev->users++;
1236 1227
1237 file->private_data = gspca_dev; 1228 file->private_data = gspca_dev;
1238#ifdef GSPCA_DEBUG 1229#ifdef GSPCA_DEBUG
@@ -1244,14 +1235,7 @@ static int dev_open(struct file *file)
1244 gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL 1235 gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
1245 | V4L2_DEBUG_IOCTL_ARG); 1236 | V4L2_DEBUG_IOCTL_ARG);
1246#endif 1237#endif
1247 ret = 0; 1238 return 0;
1248out:
1249 mutex_unlock(&gspca_dev->queue_lock);
1250 if (ret != 0)
1251 PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
1252 else
1253 PDEBUG(D_STREAM, "open done");
1254 return ret;
1255} 1239}
1256 1240
1257static int dev_close(struct file *file) 1241static int dev_close(struct file *file)
@@ -1261,7 +1245,6 @@ static int dev_close(struct file *file)
1261 PDEBUG(D_STREAM, "[%s] close", current->comm); 1245 PDEBUG(D_STREAM, "[%s] close", current->comm);
1262 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 1246 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1263 return -ERESTARTSYS; 1247 return -ERESTARTSYS;
1264 gspca_dev->users--;
1265 1248
1266 /* if the file did the capture, free the streaming resources */ 1249 /* if the file did the capture, free the streaming resources */
1267 if (gspca_dev->capt_file == file) { 1250 if (gspca_dev->capt_file == file) {
@@ -1272,8 +1255,6 @@ static int dev_close(struct file *file)
1272 mutex_unlock(&gspca_dev->usb_lock); 1255 mutex_unlock(&gspca_dev->usb_lock);
1273 } 1256 }
1274 frame_free(gspca_dev); 1257 frame_free(gspca_dev);
1275 gspca_dev->capt_file = NULL;
1276 gspca_dev->memory = GSPCA_MEMORY_NO;
1277 } 1258 }
1278 file->private_data = NULL; 1259 file->private_data = NULL;
1279 module_put(gspca_dev->module); 1260 module_put(gspca_dev->module);
@@ -1516,6 +1497,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1516 return -ERESTARTSYS; 1497 return -ERESTARTSYS;
1517 1498
1518 if (gspca_dev->memory != GSPCA_MEMORY_NO 1499 if (gspca_dev->memory != GSPCA_MEMORY_NO
1500 && gspca_dev->memory != GSPCA_MEMORY_READ
1519 && gspca_dev->memory != rb->memory) { 1501 && gspca_dev->memory != rb->memory) {
1520 ret = -EBUSY; 1502 ret = -EBUSY;
1521 goto out; 1503 goto out;
@@ -1544,19 +1526,18 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1544 gspca_stream_off(gspca_dev); 1526 gspca_stream_off(gspca_dev);
1545 mutex_unlock(&gspca_dev->usb_lock); 1527 mutex_unlock(&gspca_dev->usb_lock);
1546 } 1528 }
1529 /* Don't restart the stream when switching from read to mmap mode */
1530 if (gspca_dev->memory == GSPCA_MEMORY_READ)
1531 streaming = 0;
1547 1532
1548 /* free the previous allocated buffers, if any */ 1533 /* free the previous allocated buffers, if any */
1549 if (gspca_dev->nframes != 0) { 1534 if (gspca_dev->nframes != 0)
1550 frame_free(gspca_dev); 1535 frame_free(gspca_dev);
1551 gspca_dev->capt_file = NULL;
1552 }
1553 if (rb->count == 0) /* unrequest */ 1536 if (rb->count == 0) /* unrequest */
1554 goto out; 1537 goto out;
1555 gspca_dev->memory = rb->memory; 1538 ret = frame_alloc(gspca_dev, file, rb->memory, rb->count);
1556 ret = frame_alloc(gspca_dev, rb->count);
1557 if (ret == 0) { 1539 if (ret == 0) {
1558 rb->count = gspca_dev->nframes; 1540 rb->count = gspca_dev->nframes;
1559 gspca_dev->capt_file = file;
1560 if (streaming) 1541 if (streaming)
1561 ret = gspca_init_transfer(gspca_dev); 1542 ret = gspca_init_transfer(gspca_dev);
1562 } 1543 }
@@ -1630,11 +1611,15 @@ static int vidioc_streamoff(struct file *file, void *priv,
1630 1611
1631 if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1612 if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1632 return -EINVAL; 1613 return -EINVAL;
1633 if (!gspca_dev->streaming) 1614
1634 return 0;
1635 if (mutex_lock_interruptible(&gspca_dev->queue_lock)) 1615 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1636 return -ERESTARTSYS; 1616 return -ERESTARTSYS;
1637 1617
1618 if (!gspca_dev->streaming) {
1619 ret = 0;
1620 goto out;
1621 }
1622
1638 /* check the capture file */ 1623 /* check the capture file */
1639 if (gspca_dev->capt_file != file) { 1624 if (gspca_dev->capt_file != file) {
1640 ret = -EBUSY; 1625 ret = -EBUSY;
@@ -1649,6 +1634,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
1649 gspca_dev->usb_err = 0; 1634 gspca_dev->usb_err = 0;
1650 gspca_stream_off(gspca_dev); 1635 gspca_stream_off(gspca_dev);
1651 mutex_unlock(&gspca_dev->usb_lock); 1636 mutex_unlock(&gspca_dev->usb_lock);
1637 /* In case another thread is waiting in dqbuf */
1638 wake_up_interruptible(&gspca_dev->wq);
1652 1639
1653 /* empty the transfer queues */ 1640 /* empty the transfer queues */
1654 atomic_set(&gspca_dev->fr_q, 0); 1641 atomic_set(&gspca_dev->fr_q, 0);
@@ -1827,33 +1814,77 @@ out:
1827 return ret; 1814 return ret;
1828} 1815}
1829 1816
1817static int frame_ready_nolock(struct gspca_dev *gspca_dev, struct file *file,
1818 enum v4l2_memory memory)
1819{
1820 if (!gspca_dev->present)
1821 return -ENODEV;
1822 if (gspca_dev->capt_file != file || gspca_dev->memory != memory ||
1823 !gspca_dev->streaming)
1824 return -EINVAL;
1825
1826 /* check if a frame is ready */
1827 return gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i);
1828}
1829
1830static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
1831 enum v4l2_memory memory)
1832{
1833 int ret;
1834
1835 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1836 return -ERESTARTSYS;
1837 ret = frame_ready_nolock(gspca_dev, file, memory);
1838 mutex_unlock(&gspca_dev->queue_lock);
1839 return ret;
1840}
1841
1830/* 1842/*
1831 * wait for a video frame 1843 * dequeue a video buffer
1832 * 1844 *
1833 * If a frame is ready, its index is returned. 1845 * If nonblock_ing is false, block until a buffer is available.
1834 */ 1846 */
1835static int frame_wait(struct gspca_dev *gspca_dev, 1847static int vidioc_dqbuf(struct file *file, void *priv,
1836 int nonblock_ing) 1848 struct v4l2_buffer *v4l2_buf)
1837{ 1849{
1838 int i, ret; 1850 struct gspca_dev *gspca_dev = priv;
1851 struct gspca_frame *frame;
1852 int i, j, ret;
1839 1853
1840 /* check if a frame is ready */ 1854 PDEBUG(D_FRAM, "dqbuf");
1841 i = gspca_dev->fr_o; 1855
1842 if (i == atomic_read(&gspca_dev->fr_i)) { 1856 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1843 if (nonblock_ing) 1857 return -ERESTARTSYS;
1858
1859 for (;;) {
1860 ret = frame_ready_nolock(gspca_dev, file, v4l2_buf->memory);
1861 if (ret < 0)
1862 goto out;
1863 if (ret > 0)
1864 break;
1865
1866 mutex_unlock(&gspca_dev->queue_lock);
1867
1868 if (file->f_flags & O_NONBLOCK)
1844 return -EAGAIN; 1869 return -EAGAIN;
1845 1870
1846 /* wait till a frame is ready */ 1871 /* wait till a frame is ready */
1847 ret = wait_event_interruptible_timeout(gspca_dev->wq, 1872 ret = wait_event_interruptible_timeout(gspca_dev->wq,
1848 i != atomic_read(&gspca_dev->fr_i) || 1873 frame_ready(gspca_dev, file, v4l2_buf->memory),
1849 !gspca_dev->streaming || !gspca_dev->present,
1850 msecs_to_jiffies(3000)); 1874 msecs_to_jiffies(3000));
1851 if (ret < 0) 1875 if (ret < 0)
1852 return ret; 1876 return ret;
1853 if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present) 1877 if (ret == 0)
1854 return -EIO; 1878 return -EIO;
1879
1880 if (mutex_lock_interruptible(&gspca_dev->queue_lock))
1881 return -ERESTARTSYS;
1855 } 1882 }
1856 1883
1884 i = gspca_dev->fr_o;
1885 j = gspca_dev->fr_queue[i];
1886 frame = &gspca_dev->frame[j];
1887
1857 gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES; 1888 gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
1858 1889
1859 if (gspca_dev->sd_desc->dq_callback) { 1890 if (gspca_dev->sd_desc->dq_callback) {
@@ -1863,46 +1894,12 @@ static int frame_wait(struct gspca_dev *gspca_dev,
1863 gspca_dev->sd_desc->dq_callback(gspca_dev); 1894 gspca_dev->sd_desc->dq_callback(gspca_dev);
1864 mutex_unlock(&gspca_dev->usb_lock); 1895 mutex_unlock(&gspca_dev->usb_lock);
1865 } 1896 }
1866 return gspca_dev->fr_queue[i];
1867}
1868
1869/*
1870 * dequeue a video buffer
1871 *
1872 * If nonblock_ing is false, block until a buffer is available.
1873 */
1874static int vidioc_dqbuf(struct file *file, void *priv,
1875 struct v4l2_buffer *v4l2_buf)
1876{
1877 struct gspca_dev *gspca_dev = priv;
1878 struct gspca_frame *frame;
1879 int i, ret;
1880
1881 PDEBUG(D_FRAM, "dqbuf");
1882 if (v4l2_buf->memory != gspca_dev->memory)
1883 return -EINVAL;
1884
1885 if (!gspca_dev->present)
1886 return -ENODEV;
1887
1888 /* if not streaming, be sure the application will not loop forever */
1889 if (!(file->f_flags & O_NONBLOCK)
1890 && !gspca_dev->streaming && gspca_dev->users == 1)
1891 return -EINVAL;
1892 1897
1893 /* only the capturing file may dequeue */ 1898 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
1894 if (gspca_dev->capt_file != file) 1899 memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
1895 return -EINVAL; 1900 PDEBUG(D_FRAM, "dqbuf %d", j);
1896 1901 ret = 0;
1897 /* only one dequeue / read at a time */
1898 if (mutex_lock_interruptible(&gspca_dev->read_lock))
1899 return -ERESTARTSYS;
1900 1902
1901 ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK);
1902 if (ret < 0)
1903 goto out;
1904 i = ret; /* frame index */
1905 frame = &gspca_dev->frame[i];
1906 if (gspca_dev->memory == V4L2_MEMORY_USERPTR) { 1903 if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
1907 if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr, 1904 if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
1908 frame->data, 1905 frame->data,
@@ -1910,15 +1907,10 @@ static int vidioc_dqbuf(struct file *file, void *priv,
1910 PDEBUG(D_ERR|D_STREAM, 1907 PDEBUG(D_ERR|D_STREAM,
1911 "dqbuf cp to user failed"); 1908 "dqbuf cp to user failed");
1912 ret = -EFAULT; 1909 ret = -EFAULT;
1913 goto out;
1914 } 1910 }
1915 } 1911 }
1916 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
1917 memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
1918 PDEBUG(D_FRAM, "dqbuf %d", i);
1919 ret = 0;
1920out: 1912out:
1921 mutex_unlock(&gspca_dev->read_lock); 1913 mutex_unlock(&gspca_dev->queue_lock);
1922 return ret; 1914 return ret;
1923} 1915}
1924 1916
@@ -2033,9 +2025,7 @@ static unsigned int dev_poll(struct file *file, poll_table *wait)
2033 poll_wait(file, &gspca_dev->wq, wait); 2025 poll_wait(file, &gspca_dev->wq, wait);
2034 2026
2035 /* if reqbufs is not done, the user would use read() */ 2027 /* if reqbufs is not done, the user would use read() */
2036 if (gspca_dev->nframes == 0) { 2028 if (gspca_dev->memory == GSPCA_MEMORY_NO) {
2037 if (gspca_dev->memory != GSPCA_MEMORY_NO)
2038 return POLLERR; /* not the 1st time */
2039 ret = read_alloc(gspca_dev, file); 2029 ret = read_alloc(gspca_dev, file);
2040 if (ret != 0) 2030 if (ret != 0)
2041 return POLLERR; 2031 return POLLERR;
@@ -2067,18 +2057,10 @@ static ssize_t dev_read(struct file *file, char __user *data,
2067 PDEBUG(D_FRAM, "read (%zd)", count); 2057 PDEBUG(D_FRAM, "read (%zd)", count);
2068 if (!gspca_dev->present) 2058 if (!gspca_dev->present)
2069 return -ENODEV; 2059 return -ENODEV;
2070 switch (gspca_dev->memory) { 2060 if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
2071 case GSPCA_MEMORY_NO: /* first time */
2072 ret = read_alloc(gspca_dev, file); 2061 ret = read_alloc(gspca_dev, file);
2073 if (ret != 0) 2062 if (ret != 0)
2074 return ret; 2063 return ret;
2075 break;
2076 case GSPCA_MEMORY_READ:
2077 if (gspca_dev->capt_file == file)
2078 break;
2079 /* fall thru */
2080 default:
2081 return -EINVAL;
2082 } 2064 }
2083 2065
2084 /* get a frame */ 2066 /* get a frame */
@@ -2266,7 +2248,6 @@ int gspca_dev_probe2(struct usb_interface *intf,
2266 goto out; 2248 goto out;
2267 2249
2268 mutex_init(&gspca_dev->usb_lock); 2250 mutex_init(&gspca_dev->usb_lock);
2269 mutex_init(&gspca_dev->read_lock);
2270 mutex_init(&gspca_dev->queue_lock); 2251 mutex_init(&gspca_dev->queue_lock);
2271 init_waitqueue_head(&gspca_dev->wq); 2252 init_waitqueue_head(&gspca_dev->wq);
2272 2253
@@ -2341,12 +2322,11 @@ void gspca_disconnect(struct usb_interface *intf)
2341 PDEBUG(D_PROBE, "%s disconnect", 2322 PDEBUG(D_PROBE, "%s disconnect",
2342 video_device_node_name(&gspca_dev->vdev)); 2323 video_device_node_name(&gspca_dev->vdev));
2343 mutex_lock(&gspca_dev->usb_lock); 2324 mutex_lock(&gspca_dev->usb_lock);
2325
2344 gspca_dev->present = 0; 2326 gspca_dev->present = 0;
2327 wake_up_interruptible(&gspca_dev->wq);
2345 2328
2346 if (gspca_dev->streaming) { 2329 destroy_urbs(gspca_dev);
2347 destroy_urbs(gspca_dev);
2348 wake_up_interruptible(&gspca_dev->wq);
2349 }
2350 2330
2351#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) 2331#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
2352 gspca_input_destroy_urb(gspca_dev); 2332 gspca_input_destroy_urb(gspca_dev);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 97b77a26a2eb..41755226d389 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -205,14 +205,12 @@ struct gspca_dev {
205 205
206 wait_queue_head_t wq; /* wait queue */ 206 wait_queue_head_t wq; /* wait queue */
207 struct mutex usb_lock; /* usb exchange protection */ 207 struct mutex usb_lock; /* usb exchange protection */
208 struct mutex read_lock; /* read protection */
209 struct mutex queue_lock; /* ISOC queue protection */ 208 struct mutex queue_lock; /* ISOC queue protection */
210 int usb_err; /* USB error - protected by usb_lock */ 209 int usb_err; /* USB error - protected by usb_lock */
211 u16 pkt_size; /* ISOC packet size */ 210 u16 pkt_size; /* ISOC packet size */
212#ifdef CONFIG_PM 211#ifdef CONFIG_PM
213 char frozen; /* suspend - resume */ 212 char frozen; /* suspend - resume */
214#endif 213#endif
215 char users; /* number of opens */
216 char present; /* device connected */ 214 char present; /* device connected */
217 char nbufread; /* number of buffers for read() */ 215 char nbufread; /* number of buffers for read() */
218 char memory; /* memory type (V4L2_MEMORY_xxx) */ 216 char memory; /* memory type (V4L2_MEMORY_xxx) */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index a35e87bb0388..06b777f5379e 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -314,7 +314,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
314} 314}
315 315
316/* Table of supported USB devices */ 316/* Table of supported USB devices */
317static const __devinitdata struct usb_device_id device_table[] = { 317static const struct usb_device_id device_table[] = {
318 {USB_DEVICE(0x0979, 0x0280)}, 318 {USB_DEVICE(0x0979, 0x0280)},
319 {} 319 {}
320}; 320};
diff --git a/drivers/media/video/gspca/jpeg.h b/drivers/media/video/gspca/jpeg.h
index de63c36806c0..ab54910418b4 100644
--- a/drivers/media/video/gspca/jpeg.h
+++ b/drivers/media/video/gspca/jpeg.h
@@ -141,9 +141,9 @@ static void jpeg_define(u8 *jpeg_hdr,
141 memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head); 141 memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
142#ifndef CONEX_CAM 142#ifndef CONEX_CAM
143 jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8; 143 jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
144 jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height & 0xff; 144 jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
145 jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8; 145 jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
146 jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width & 0xff; 146 jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
147 jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY; 147 jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY;
148#endif 148#endif
149} 149}
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index d2ce65dcbfdc..5964691c0e95 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -607,7 +607,7 @@ static const struct sd_desc sd_desc = {
607}; 607};
608 608
609/* -- module initialisation -- */ 609/* -- module initialisation -- */
610static const __devinitdata struct usb_device_id device_table[] = { 610static const struct usb_device_id device_table[] = {
611 {USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */ 611 {USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
612 {} 612 {}
613}; 613};
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index c872b93a3351..a7722b1aef9b 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -28,7 +28,7 @@ int force_sensor;
28static int dump_bridge; 28static int dump_bridge;
29int dump_sensor; 29int dump_sensor;
30 30
31static const __devinitdata struct usb_device_id m5602_table[] = { 31static const struct usb_device_id m5602_table[] = {
32 {USB_DEVICE(0x0402, 0x5602)}, 32 {USB_DEVICE(0x0402, 0x5602)},
33 {} 33 {}
34}; 34};
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index a81536e78698..cb4d0bf0d784 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -490,7 +490,7 @@ static const struct sd_desc sd_desc = {
490}; 490};
491 491
492/* -- module initialisation -- */ 492/* -- module initialisation -- */
493static const __devinitdata struct usb_device_id device_table[] = { 493static const struct usb_device_id device_table[] = {
494 {USB_DEVICE(0x093a, 0x050f)}, 494 {USB_DEVICE(0x093a, 0x050f)},
495 {} 495 {}
496}; 496};
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 7607a288b51c..3884c9d300c5 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1229,7 +1229,7 @@ static const struct sd_desc sd_desc = {
1229}; 1229};
1230 1230
1231/* -- module initialisation -- */ 1231/* -- module initialisation -- */
1232static const __devinitdata struct usb_device_id device_table[] = { 1232static const struct usb_device_id device_table[] = {
1233 {USB_DEVICE(0x08ca, 0x0110)}, /* Trust Spyc@m 100 */ 1233 {USB_DEVICE(0x08ca, 0x0110)}, /* Trust Spyc@m 100 */
1234 {USB_DEVICE(0x08ca, 0x0111)}, /* Aiptek Pencam VGA+ */ 1234 {USB_DEVICE(0x08ca, 0x0111)}, /* Aiptek Pencam VGA+ */
1235 {USB_DEVICE(0x093a, 0x010f)}, /* All other known MR97310A VGA cams */ 1235 {USB_DEVICE(0x093a, 0x010f)}, /* All other known MR97310A VGA cams */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index e1c3b9328ace..8ab2c452c25e 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -488,7 +488,6 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
488#define R511_SNAP_PXDIV 0x1c 488#define R511_SNAP_PXDIV 0x1c
489#define R511_SNAP_LNDIV 0x1d 489#define R511_SNAP_LNDIV 0x1d
490#define R511_SNAP_UV_EN 0x1e 490#define R511_SNAP_UV_EN 0x1e
491#define R511_SNAP_UV_EN 0x1e
492#define R511_SNAP_OPTS 0x1f 491#define R511_SNAP_OPTS 0x1f
493 492
494#define R511_DRAM_FLOW_CTL 0x20 493#define R511_DRAM_FLOW_CTL 0x20
@@ -1847,8 +1846,7 @@ static const struct ov_i2c_regvals norm_7670[] = {
1847 { 0x6c, 0x0a }, 1846 { 0x6c, 0x0a },
1848 { 0x6d, 0x55 }, 1847 { 0x6d, 0x55 },
1849 { 0x6e, 0x11 }, 1848 { 0x6e, 0x11 },
1850 { 0x6f, 0x9f }, 1849 { 0x6f, 0x9f }, /* "9e for advance AWB" */
1851 /* "9e for advance AWB" */
1852 { 0x6a, 0x40 }, 1850 { 0x6a, 0x40 },
1853 { OV7670_R01_BLUE, 0x40 }, 1851 { OV7670_R01_BLUE, 0x40 },
1854 { OV7670_R02_RED, 0x60 }, 1852 { OV7670_R02_RED, 0x60 },
@@ -3054,7 +3052,7 @@ static void ov519_configure(struct sd *sd)
3054{ 3052{
3055 static const struct ov_regvals init_519[] = { 3053 static const struct ov_regvals init_519[] = {
3056 { 0x5a, 0x6d }, /* EnableSystem */ 3054 { 0x5a, 0x6d }, /* EnableSystem */
3057 { 0x53, 0x9b }, 3055 { 0x53, 0x9b }, /* don't enable the microcontroller */
3058 { OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */ 3056 { OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
3059 { 0x5d, 0x03 }, 3057 { 0x5d, 0x03 },
3060 { 0x49, 0x01 }, 3058 { 0x49, 0x01 },
@@ -4747,7 +4745,7 @@ static const struct sd_desc sd_desc = {
4747}; 4745};
4748 4746
4749/* -- module initialisation -- */ 4747/* -- module initialisation -- */
4750static const __devinitdata struct usb_device_id device_table[] = { 4748static const struct usb_device_id device_table[] = {
4751 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, 4749 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
4752 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, 4750 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
4753 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, 4751 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0edf93973b1c..04da22802736 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -479,15 +479,20 @@ static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val)
479 struct usb_device *udev = gspca_dev->dev; 479 struct usb_device *udev = gspca_dev->dev;
480 int ret; 480 int ret;
481 481
482 PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val); 482 if (gspca_dev->usb_err < 0)
483 return;
484
485 PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val);
483 gspca_dev->usb_buf[0] = val; 486 gspca_dev->usb_buf[0] = val;
484 ret = usb_control_msg(udev, 487 ret = usb_control_msg(udev,
485 usb_sndctrlpipe(udev, 0), 488 usb_sndctrlpipe(udev, 0),
486 0x01, 489 0x01,
487 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 490 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
488 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); 491 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
489 if (ret < 0) 492 if (ret < 0) {
490 err("write failed %d", ret); 493 err("write failed %d", ret);
494 gspca_dev->usb_err = ret;
495 }
491} 496}
492 497
493static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg) 498static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -495,14 +500,18 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
495 struct usb_device *udev = gspca_dev->dev; 500 struct usb_device *udev = gspca_dev->dev;
496 int ret; 501 int ret;
497 502
503 if (gspca_dev->usb_err < 0)
504 return 0;
498 ret = usb_control_msg(udev, 505 ret = usb_control_msg(udev,
499 usb_rcvctrlpipe(udev, 0), 506 usb_rcvctrlpipe(udev, 0),
500 0x01, 507 0x01,
501 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 508 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
502 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); 509 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
503 PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]); 510 PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
504 if (ret < 0) 511 if (ret < 0) {
505 err("read failed %d", ret); 512 err("read failed %d", ret);
513 gspca_dev->usb_err = ret;
514 }
506 return gspca_dev->usb_buf[0]; 515 return gspca_dev->usb_buf[0];
507} 516}
508 517
@@ -558,13 +567,15 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
558 567
559static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val) 568static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
560{ 569{
561 PDEBUG(D_USBO, "reg: 0x%02x, val: 0x%02x", reg, val); 570 PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val);
562 ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg); 571 ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
563 ov534_reg_write(gspca_dev, OV534_REG_WRITE, val); 572 ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
564 ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3); 573 ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
565 574
566 if (!sccb_check_status(gspca_dev)) 575 if (!sccb_check_status(gspca_dev)) {
567 err("sccb_reg_write failed"); 576 err("sccb_reg_write failed");
577 gspca_dev->usb_err = -EIO;
578 }
568} 579}
569 580
570static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg) 581static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -885,7 +896,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
885 ov534_set_led(gspca_dev, 0); 896 ov534_set_led(gspca_dev, 0);
886 set_frame_rate(gspca_dev); 897 set_frame_rate(gspca_dev);
887 898
888 return 0; 899 return gspca_dev->usb_err;
889} 900}
890 901
891static int sd_start(struct gspca_dev *gspca_dev) 902static int sd_start(struct gspca_dev *gspca_dev)
@@ -920,7 +931,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
920 931
921 ov534_set_led(gspca_dev, 1); 932 ov534_set_led(gspca_dev, 1);
922 ov534_reg_write(gspca_dev, 0xe0, 0x00); 933 ov534_reg_write(gspca_dev, 0xe0, 0x00);
923 return 0; 934 return gspca_dev->usb_err;
924} 935}
925 936
926static void sd_stopN(struct gspca_dev *gspca_dev) 937static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1289,7 +1300,7 @@ static const struct sd_desc sd_desc = {
1289}; 1300};
1290 1301
1291/* -- module initialisation -- */ 1302/* -- module initialisation -- */
1292static const __devinitdata struct usb_device_id device_table[] = { 1303static const struct usb_device_id device_table[] = {
1293 {USB_DEVICE(0x1415, 0x2000)}, 1304 {USB_DEVICE(0x1415, 0x2000)},
1294 {} 1305 {}
1295}; 1306};
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index c5244b4b4777..aaf5428c57f5 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1429,7 +1429,7 @@ static const struct sd_desc sd_desc = {
1429}; 1429};
1430 1430
1431/* -- module initialisation -- */ 1431/* -- module initialisation -- */
1432static const __devinitdata struct usb_device_id device_table[] = { 1432static const struct usb_device_id device_table[] = {
1433 {USB_DEVICE(0x06f8, 0x3003)}, 1433 {USB_DEVICE(0x06f8, 0x3003)},
1434 {} 1434 {}
1435}; 1435};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 96f9986305b4..81739a2f205e 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -530,7 +530,7 @@ static const struct sd_desc sd_desc = {
530}; 530};
531 531
532/* -- module initialisation -- */ 532/* -- module initialisation -- */
533static const __devinitdata struct usb_device_id device_table[] = { 533static const struct usb_device_id device_table[] = {
534 {USB_DEVICE(0x041e, 0x4028)}, 534 {USB_DEVICE(0x041e, 0x4028)},
535 {USB_DEVICE(0x093a, 0x2460)}, 535 {USB_DEVICE(0x093a, 0x2460)},
536 {USB_DEVICE(0x093a, 0x2461)}, 536 {USB_DEVICE(0x093a, 0x2461)},
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2700975abce5..5615d7bd8304 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1184,7 +1184,7 @@ static const struct sd_desc sd_desc = {
1184}; 1184};
1185 1185
1186/* -- module initialisation -- */ 1186/* -- module initialisation -- */
1187static const struct usb_device_id device_table[] __devinitconst = { 1187static const struct usb_device_id device_table[] = {
1188 {USB_DEVICE(0x06f8, 0x3009)}, 1188 {USB_DEVICE(0x06f8, 0x3009)},
1189 {USB_DEVICE(0x093a, 0x2620)}, 1189 {USB_DEVICE(0x093a, 0x2620)},
1190 {USB_DEVICE(0x093a, 0x2621)}, 1190 {USB_DEVICE(0x093a, 0x2621)},
@@ -1201,7 +1201,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
1201MODULE_DEVICE_TABLE(usb, device_table); 1201MODULE_DEVICE_TABLE(usb, device_table);
1202 1202
1203/* -- device connect -- */ 1203/* -- device connect -- */
1204static int __devinit sd_probe(struct usb_interface *intf, 1204static int sd_probe(struct usb_interface *intf,
1205 const struct usb_device_id *id) 1205 const struct usb_device_id *id)
1206{ 1206{
1207 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1207 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 6820f5d58b19..f8801b50e64f 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -837,7 +837,7 @@ static const struct sd_desc sd_desc = {
837}; 837};
838 838
839/* -- module initialisation -- */ 839/* -- module initialisation -- */
840static const struct usb_device_id device_table[] __devinitconst = { 840static const struct usb_device_id device_table[] = {
841 {USB_DEVICE(0x093a, 0x2600)}, 841 {USB_DEVICE(0x093a, 0x2600)},
842 {USB_DEVICE(0x093a, 0x2601)}, 842 {USB_DEVICE(0x093a, 0x2601)},
843 {USB_DEVICE(0x093a, 0x2603)}, 843 {USB_DEVICE(0x093a, 0x2603)},
@@ -849,7 +849,7 @@ static const struct usb_device_id device_table[] __devinitconst = {
849MODULE_DEVICE_TABLE(usb, device_table); 849MODULE_DEVICE_TABLE(usb, device_table);
850 850
851/* -- device connect -- */ 851/* -- device connect -- */
852static int __devinit sd_probe(struct usb_interface *intf, 852static int sd_probe(struct usb_interface *intf,
853 const struct usb_device_id *id) 853 const struct usb_device_id *id)
854{ 854{
855 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 855 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 40a06680502d..4271f86dfe01 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -703,7 +703,7 @@ static const struct sd_desc sd_desc = {
703}; 703};
704 704
705/* -- module initialisation -- */ 705/* -- module initialisation -- */
706static const __devinitdata struct usb_device_id device_table[] = { 706static const struct usb_device_id device_table[] = {
707 {USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */ 707 {USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
708 /* The Genius Smart is untested. I can't find an owner ! */ 708 /* The Genius Smart is untested. I can't find an owner ! */
709 /* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */ 709 /* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index cb08d00d0a31..fcf29897b713 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2470,7 +2470,7 @@ static const struct sd_desc sd_desc = {
2470 | (SENSOR_ ## sensor << 8) \ 2470 | (SENSOR_ ## sensor << 8) \
2471 | (i2c_addr) 2471 | (i2c_addr)
2472 2472
2473static const __devinitdata struct usb_device_id device_table[] = { 2473static const struct usb_device_id device_table[] = {
2474 {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)}, 2474 {USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
2475 {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)}, 2475 {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
2476 {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)}, 2476 {USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 73504a3f87b7..c6cd68d66b53 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -23,8 +23,15 @@
23/* Some documentation on known sonixb registers: 23/* Some documentation on known sonixb registers:
24 24
25Reg Use 25Reg Use
26sn9c101 / sn9c102:
260x10 high nibble red gain low nibble blue gain 270x10 high nibble red gain low nibble blue gain
270x11 low nibble green gain 280x11 low nibble green gain
29sn9c103:
300x05 red gain 0-127
310x06 blue gain 0-127
320x07 green gain 0-127
33all:
340x08-0x0f i2c / 3wire registers
280x12 hstart 350x12 hstart
290x13 vstart 360x13 vstart
300x15 hsize (hsize = register-value * 16) 370x15 hsize (hsize = register-value * 16)
@@ -88,12 +95,9 @@ struct sd {
88typedef const __u8 sensor_init_t[8]; 95typedef const __u8 sensor_init_t[8];
89 96
90struct sensor_data { 97struct sensor_data {
91 const __u8 *bridge_init[2]; 98 const __u8 *bridge_init;
92 int bridge_init_size[2];
93 sensor_init_t *sensor_init; 99 sensor_init_t *sensor_init;
94 int sensor_init_size; 100 int sensor_init_size;
95 sensor_init_t *sensor_bridge_init[2];
96 int sensor_bridge_init_size[2];
97 int flags; 101 int flags;
98 unsigned ctrl_dis; 102 unsigned ctrl_dis;
99 __u8 sensor_addr; 103 __u8 sensor_addr;
@@ -114,7 +118,6 @@ struct sensor_data {
114#define NO_FREQ (1 << FREQ_IDX) 118#define NO_FREQ (1 << FREQ_IDX)
115#define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX) 119#define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
116 120
117#define COMP2 0x8f
118#define COMP 0xc7 /* 0x87 //0x07 */ 121#define COMP 0xc7 /* 0x87 //0x07 */
119#define COMP1 0xc9 /* 0x89 //0x09 */ 122#define COMP1 0xc9 /* 0x89 //0x09 */
120 123
@@ -123,15 +126,11 @@ struct sensor_data {
123 126
124#define SYS_CLK 0x04 127#define SYS_CLK 0x04
125 128
126#define SENS(bridge_1, bridge_3, sensor, sensor_1, \ 129#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
127 sensor_3, _flags, _ctrl_dis, _sensor_addr) \
128{ \ 130{ \
129 .bridge_init = { bridge_1, bridge_3 }, \ 131 .bridge_init = bridge, \
130 .bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
131 .sensor_init = sensor, \ 132 .sensor_init = sensor, \
132 .sensor_init_size = sizeof(sensor), \ 133 .sensor_init_size = sizeof(sensor), \
133 .sensor_bridge_init = { sensor_1, sensor_3,}, \
134 .sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
135 .flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \ 134 .flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
136} 135}
137 136
@@ -311,7 +310,6 @@ static const __u8 initHv7131d[] = {
311 0x00, 0x00, 310 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, 311 0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
313 0x28, 0x1e, 0x60, 0x8e, 0x42, 312 0x28, 0x1e, 0x60, 0x8e, 0x42,
314 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
315}; 313};
316static const __u8 hv7131d_sensor_init[][8] = { 314static const __u8 hv7131d_sensor_init[][8] = {
317 {0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17}, 315 {0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17},
@@ -326,7 +324,6 @@ static const __u8 initHv7131r[] = {
326 0x00, 0x00, 324 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 325 0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
328 0x28, 0x1e, 0x60, 0x8a, 0x20, 326 0x28, 0x1e, 0x60, 0x8a, 0x20,
329 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
330}; 327};
331static const __u8 hv7131r_sensor_init[][8] = { 328static const __u8 hv7131r_sensor_init[][8] = {
332 {0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10}, 329 {0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10},
@@ -339,7 +336,7 @@ static const __u8 initOv6650[] = {
339 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 336 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
340 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 337 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
341 0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b, 338 0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
342 0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07 339 0x10,
343}; 340};
344static const __u8 ov6650_sensor_init[][8] = { 341static const __u8 ov6650_sensor_init[][8] = {
345 /* Bright, contrast, etc are set through SCBB interface. 342 /* Bright, contrast, etc are set through SCBB interface.
@@ -378,24 +375,13 @@ static const __u8 initOv7630[] = {
378 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */ 375 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
379 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */ 376 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */
380 0x28, 0x1e, /* H & V sizes r15 .. r16 */ 377 0x28, 0x1e, /* H & V sizes r15 .. r16 */
381 0x68, COMP2, MCK_INIT1, /* r17 .. r19 */
382 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */
383};
384static const __u8 initOv7630_3[] = {
385 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */
386 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
387 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */
388 0x28, 0x1e, /* H & V sizes r15 .. r16 */
389 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */ 378 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */
390 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00, /* r1a .. r20 */
391 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, /* r21 .. r28 */
392 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff /* r29 .. r30 */
393}; 379};
394static const __u8 ov7630_sensor_init[][8] = { 380static const __u8 ov7630_sensor_init[][8] = {
395 {0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, 381 {0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10},
396 {0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10}, 382 {0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10},
397/* {0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10}, jfm */ 383/* {0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10}, jfm */
398 {0xd0, 0x21, 0x12, 0x1c, 0x00, 0x80, 0x34, 0x10}, /* jfm */ 384 {0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10}, /* jfm */
399 {0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10}, 385 {0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10},
400 {0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10}, 386 {0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10},
401 {0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10}, 387 {0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10},
@@ -413,16 +399,11 @@ static const __u8 ov7630_sensor_init[][8] = {
413 {0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10}, 399 {0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
414}; 400};
415 401
416static const __u8 ov7630_sensor_init_3[][8] = {
417 {0xa0, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
418};
419
420static const __u8 initPas106[] = { 402static const __u8 initPas106[] = {
421 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00, 403 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
422 0x00, 0x00, 404 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x04, 0x01, 0x00, 405 0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
424 0x16, 0x12, 0x24, COMP1, MCK_INIT1, 406 0x16, 0x12, 0x24, COMP1, MCK_INIT1,
425 0x18, 0x10, 0x02, 0x02, 0x09, 0x07
426}; 407};
427/* compression 0x86 mckinit1 0x2b */ 408/* compression 0x86 mckinit1 0x2b */
428 409
@@ -496,7 +477,6 @@ static const __u8 initPas202[] = {
496 0x00, 0x00, 477 0x00, 0x00,
497 0x00, 0x00, 0x00, 0x06, 0x03, 0x0a, 478 0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
498 0x28, 0x1e, 0x20, 0x89, 0x20, 479 0x28, 0x1e, 0x20, 0x89, 0x20,
499 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
500}; 480};
501 481
502/* "Known" PAS202BCB registers: 482/* "Known" PAS202BCB registers:
@@ -537,7 +517,6 @@ static const __u8 initTas5110c[] = {
537 0x00, 0x00, 517 0x00, 0x00,
538 0x00, 0x00, 0x00, 0x45, 0x09, 0x0a, 518 0x00, 0x00, 0x00, 0x45, 0x09, 0x0a,
539 0x16, 0x12, 0x60, 0x86, 0x2b, 519 0x16, 0x12, 0x60, 0x86, 0x2b,
540 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
541}; 520};
542/* Same as above, except a different hstart */ 521/* Same as above, except a different hstart */
543static const __u8 initTas5110d[] = { 522static const __u8 initTas5110d[] = {
@@ -545,12 +524,19 @@ static const __u8 initTas5110d[] = {
545 0x00, 0x00, 524 0x00, 0x00,
546 0x00, 0x00, 0x00, 0x41, 0x09, 0x0a, 525 0x00, 0x00, 0x00, 0x41, 0x09, 0x0a,
547 0x16, 0x12, 0x60, 0x86, 0x2b, 526 0x16, 0x12, 0x60, 0x86, 0x2b,
548 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
549}; 527};
550static const __u8 tas5110_sensor_init[][8] = { 528/* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */
529static const __u8 tas5110c_sensor_init[][8] = {
551 {0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10}, 530 {0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10},
552 {0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10}, 531 {0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10},
553 {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, 532};
533/* Known TAS5110D registers
534 * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain
535 * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted)
536 * Note: writing reg03 seems to only work when written together with 02
537 */
538static const __u8 tas5110d_sensor_init[][8] = {
539 {0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */
554}; 540};
555 541
556static const __u8 initTas5130[] = { 542static const __u8 initTas5130[] = {
@@ -558,7 +544,6 @@ static const __u8 initTas5130[] = {
558 0x00, 0x00, 544 0x00, 0x00,
559 0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a, 545 0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a,
560 0x28, 0x1e, 0x60, COMP, MCK_INIT, 546 0x28, 0x1e, 0x60, COMP, MCK_INIT,
561 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
562}; 547};
563static const __u8 tas5130_sensor_init[][8] = { 548static const __u8 tas5130_sensor_init[][8] = {
564/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10}, 549/* {0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
@@ -569,21 +554,18 @@ static const __u8 tas5130_sensor_init[][8] = {
569}; 554};
570 555
571static struct sensor_data sensor_data[] = { 556static struct sensor_data sensor_data[] = {
572SENS(initHv7131d, NULL, hv7131d_sensor_init, NULL, NULL, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0), 557SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
573SENS(initHv7131r, NULL, hv7131r_sensor_init, NULL, NULL, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0), 558SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
574SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60), 559SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
575SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3, 560SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
576 F_GAIN, 0, 0x21), 561SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
577SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ, 562SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
578 0), 563SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
579SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN, 564 NO_BRIGHTNESS|NO_FREQ, 0),
580 NO_FREQ, 0), 565SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
581SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL, 566 NO_BRIGHTNESS|NO_FREQ, 0),
582 F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0), 567SENS(initTas5130, tas5130_sensor_init, F_GAIN,
583SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL, 568 NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
584 F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
585SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
586 0),
587}; 569};
588 570
589/* get one byte in gspca_dev->usb_buf */ 571/* get one byte in gspca_dev->usb_buf */
@@ -655,7 +637,6 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
655static void setbrightness(struct gspca_dev *gspca_dev) 637static void setbrightness(struct gspca_dev *gspca_dev)
656{ 638{
657 struct sd *sd = (struct sd *) gspca_dev; 639 struct sd *sd = (struct sd *) gspca_dev;
658 __u8 value;
659 640
660 switch (sd->sensor) { 641 switch (sd->sensor) {
661 case SENSOR_OV6650: 642 case SENSOR_OV6650:
@@ -697,17 +678,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
697 goto err; 678 goto err;
698 break; 679 break;
699 } 680 }
700 case SENSOR_TAS5130CXX: {
701 __u8 i2c[] =
702 {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
703
704 value = 0xff - sd->brightness;
705 i2c[4] = value;
706 PDEBUG(D_CONF, "brightness %d : %d", value, i2c[4]);
707 if (i2c_w(gspca_dev, i2c) < 0)
708 goto err;
709 break;
710 }
711 } 681 }
712 return; 682 return;
713err: 683err:
@@ -733,7 +703,7 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
733 break; 703 break;
734 } 704 }
735 case SENSOR_TAS5110C: 705 case SENSOR_TAS5110C:
736 case SENSOR_TAS5110D: { 706 case SENSOR_TAS5130CXX: {
737 __u8 i2c[] = 707 __u8 i2c[] =
738 {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10}; 708 {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
739 709
@@ -742,6 +712,23 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
742 goto err; 712 goto err;
743 break; 713 break;
744 } 714 }
715 case SENSOR_TAS5110D: {
716 __u8 i2c[] = {
717 0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
718 gain = 255 - gain;
719 /* The bits in the register are the wrong way around!! */
720 i2c[3] |= (gain & 0x80) >> 7;
721 i2c[3] |= (gain & 0x40) >> 5;
722 i2c[3] |= (gain & 0x20) >> 3;
723 i2c[3] |= (gain & 0x10) >> 1;
724 i2c[3] |= (gain & 0x08) << 1;
725 i2c[3] |= (gain & 0x04) << 3;
726 i2c[3] |= (gain & 0x02) << 5;
727 i2c[3] |= (gain & 0x01) << 7;
728 if (i2c_w(gspca_dev, i2c) < 0)
729 goto err;
730 break;
731 }
745 732
746 case SENSOR_OV6650: 733 case SENSOR_OV6650:
747 gain >>= 1; 734 gain >>= 1;
@@ -796,7 +783,7 @@ static void setgain(struct gspca_dev *gspca_dev)
796{ 783{
797 struct sd *sd = (struct sd *) gspca_dev; 784 struct sd *sd = (struct sd *) gspca_dev;
798 __u8 gain; 785 __u8 gain;
799 __u8 buf[2] = { 0, 0 }; 786 __u8 buf[3] = { 0, 0, 0 };
800 787
801 if (sensor_data[sd->sensor].flags & F_GAIN) { 788 if (sensor_data[sd->sensor].flags & F_GAIN) {
802 /* Use the sensor gain to do the actual gain */ 789 /* Use the sensor gain to do the actual gain */
@@ -804,13 +791,18 @@ static void setgain(struct gspca_dev *gspca_dev)
804 return; 791 return;
805 } 792 }
806 793
807 gain = sd->gain >> 4; 794 if (sd->bridge == BRIDGE_103) {
808 795 gain = sd->gain >> 1;
809 /* red and blue gain */ 796 buf[0] = gain; /* Red */
810 buf[0] = gain << 4 | gain; 797 buf[1] = gain; /* Green */
811 /* green gain */ 798 buf[2] = gain; /* Blue */
812 buf[1] = gain; 799 reg_w(gspca_dev, 0x05, buf, 3);
813 reg_w(gspca_dev, 0x10, buf, 2); 800 } else {
801 gain = sd->gain >> 4;
802 buf[0] = gain << 4 | gain; /* Red and blue */
803 buf[1] = gain; /* Green */
804 reg_w(gspca_dev, 0x10, buf, 2);
805 }
814} 806}
815 807
816static void setexposure(struct gspca_dev *gspca_dev) 808static void setexposure(struct gspca_dev *gspca_dev)
@@ -1049,7 +1041,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
1049 desired_avg_lum = 5000; 1041 desired_avg_lum = 5000;
1050 } else { 1042 } else {
1051 deadzone = 1500; 1043 deadzone = 1500;
1052 desired_avg_lum = 18000; 1044 desired_avg_lum = 13000;
1053 } 1045 }
1054 1046
1055 if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) 1047 if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
@@ -1127,53 +1119,91 @@ static int sd_start(struct gspca_dev *gspca_dev)
1127{ 1119{
1128 struct sd *sd = (struct sd *) gspca_dev; 1120 struct sd *sd = (struct sd *) gspca_dev;
1129 struct cam *cam = &gspca_dev->cam; 1121 struct cam *cam = &gspca_dev->cam;
1130 int mode, l; 1122 int i, mode;
1131 const __u8 *sn9c10x; 1123 __u8 regs[0x31];
1132 __u8 reg12_19[8];
1133 1124
1134 mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07; 1125 mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
1135 sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge]; 1126 /* Copy registers 0x01 - 0x19 from the template */
1136 l = sensor_data[sd->sensor].bridge_init_size[sd->bridge]; 1127 memcpy(&regs[0x01], sensor_data[sd->sensor].bridge_init, 0x19);
1137 memcpy(reg12_19, &sn9c10x[0x12 - 1], 8); 1128 /* Set the mode */
1138 reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4); 1129 regs[0x18] |= mode << 4;
1139 /* Special cases where reg 17 and or 19 value depends on mode */ 1130
1131 /* Set bridge gain to 1.0 */
1132 if (sd->bridge == BRIDGE_103) {
1133 regs[0x05] = 0x20; /* Red */
1134 regs[0x06] = 0x20; /* Green */
1135 regs[0x07] = 0x20; /* Blue */
1136 } else {
1137 regs[0x10] = 0x00; /* Red and blue */
1138 regs[0x11] = 0x00; /* Green */
1139 }
1140
1141 /* Setup pixel numbers and auto exposure window */
1142 if (sensor_data[sd->sensor].flags & F_SIF) {
1143 regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */
1144 regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */
1145 regs[0x1c] = 0x02; /* AE H-start 64 */
1146 regs[0x1d] = 0x02; /* AE V-start 64 */
1147 regs[0x1e] = 0x09; /* AE H-end 288 */
1148 regs[0x1f] = 0x07; /* AE V-end 224 */
1149 } else {
1150 regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */
1151 regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */
1152 regs[0x1c] = 0x05; /* AE H-start 160 */
1153 regs[0x1d] = 0x03; /* AE V-start 96 */
1154 regs[0x1e] = 0x0f; /* AE H-end 480 */
1155 regs[0x1f] = 0x0c; /* AE V-end 384 */
1156 }
1157
1158 /* Setup the gamma table (only used with the sn9c103 bridge) */
1159 for (i = 0; i < 16; i++)
1160 regs[0x20 + i] = i * 16;
1161 regs[0x20 + i] = 255;
1162
1163 /* Special cases where some regs depend on mode or bridge */
1140 switch (sd->sensor) { 1164 switch (sd->sensor) {
1141 case SENSOR_TAS5130CXX: 1165 case SENSOR_TAS5130CXX:
1142 /* probably not mode specific at all most likely the upper 1166 /* FIXME / TESTME
1167 probably not mode specific at all most likely the upper
1143 nibble of 0x19 is exposure (clock divider) just as with 1168 nibble of 0x19 is exposure (clock divider) just as with
1144 the tas5110, we need someone to test this. */ 1169 the tas5110, we need someone to test this. */
1145 reg12_19[7] = mode ? 0x23 : 0x43; 1170 regs[0x19] = mode ? 0x23 : 0x43;
1146 break; 1171 break;
1172 case SENSOR_OV7630:
1173 /* FIXME / TESTME for some reason with the 101/102 bridge the
1174 clock is set to 12 Mhz (reg1 == 0x04), rather then 24.
1175 Also the hstart needs to go from 1 to 2 when using a 103,
1176 which is likely related. This does not seem right. */
1177 if (sd->bridge == BRIDGE_103) {
1178 regs[0x01] = 0x44; /* Select 24 Mhz clock */
1179 regs[0x12] = 0x02; /* Set hstart to 2 */
1180 }
1147 } 1181 }
1148 /* Disable compression when the raw bayer format has been selected */ 1182 /* Disable compression when the raw bayer format has been selected */
1149 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) 1183 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
1150 reg12_19[6] &= ~0x80; 1184 regs[0x18] &= ~0x80;
1151 1185
1152 /* Vga mode emulation on SIF sensor? */ 1186 /* Vga mode emulation on SIF sensor? */
1153 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) { 1187 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
1154 reg12_19[0] += 16; /* 0x12: hstart adjust */ 1188 regs[0x12] += 16; /* hstart adjust */
1155 reg12_19[1] += 24; /* 0x13: vstart adjust */ 1189 regs[0x13] += 24; /* vstart adjust */
1156 reg12_19[3] = 320 / 16; /* 0x15: hsize */ 1190 regs[0x15] = 320 / 16; /* hsize */
1157 reg12_19[4] = 240 / 16; /* 0x16: vsize */ 1191 regs[0x16] = 240 / 16; /* vsize */
1158 } 1192 }
1159 1193
1160 /* reg 0x01 bit 2 video transfert on */ 1194 /* reg 0x01 bit 2 video transfert on */
1161 reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1); 1195 reg_w(gspca_dev, 0x01, &regs[0x01], 1);
1162 /* reg 0x17 SensorClk enable inv Clk 0x60 */ 1196 /* reg 0x17 SensorClk enable inv Clk 0x60 */
1163 reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1); 1197 reg_w(gspca_dev, 0x17, &regs[0x17], 1);
1164 /* Set the registers from the template */ 1198 /* Set the registers from the template */
1165 reg_w(gspca_dev, 0x01, sn9c10x, l); 1199 reg_w(gspca_dev, 0x01, &regs[0x01],
1200 (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f);
1166 1201
1167 /* Init the sensor */ 1202 /* Init the sensor */
1168 i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init, 1203 i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
1169 sensor_data[sd->sensor].sensor_init_size); 1204 sensor_data[sd->sensor].sensor_init_size);
1170 if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
1171 i2c_w_vector(gspca_dev,
1172 sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
1173 sensor_data[sd->sensor].sensor_bridge_init_size[
1174 sd->bridge]);
1175 1205
1176 /* Mode specific sensor setup */ 1206 /* Mode / bridge specific sensor setup */
1177 switch (sd->sensor) { 1207 switch (sd->sensor) {
1178 case SENSOR_PAS202: { 1208 case SENSOR_PAS202: {
1179 const __u8 i2cpclockdiv[] = 1209 const __u8 i2cpclockdiv[] =
@@ -1181,27 +1211,37 @@ static int sd_start(struct gspca_dev *gspca_dev)
1181 /* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */ 1211 /* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */
1182 if (mode) 1212 if (mode)
1183 i2c_w(gspca_dev, i2cpclockdiv); 1213 i2c_w(gspca_dev, i2cpclockdiv);
1214 break;
1184 } 1215 }
1216 case SENSOR_OV7630:
1217 /* FIXME / TESTME We should be able to handle this identical
1218 for the 101/102 and the 103 case */
1219 if (sd->bridge == BRIDGE_103) {
1220 const __u8 i2c[] = { 0xa0, 0x21, 0x13,
1221 0x80, 0x00, 0x00, 0x00, 0x10 };
1222 i2c_w(gspca_dev, i2c);
1223 }
1224 break;
1185 } 1225 }
1186 /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */ 1226 /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
1187 reg_w(gspca_dev, 0x15, &reg12_19[3], 2); 1227 reg_w(gspca_dev, 0x15, &regs[0x15], 2);
1188 /* compression register */ 1228 /* compression register */
1189 reg_w(gspca_dev, 0x18, &reg12_19[6], 1); 1229 reg_w(gspca_dev, 0x18, &regs[0x18], 1);
1190 /* H_start */ 1230 /* H_start */
1191 reg_w(gspca_dev, 0x12, &reg12_19[0], 1); 1231 reg_w(gspca_dev, 0x12, &regs[0x12], 1);
1192 /* V_START */ 1232 /* V_START */
1193 reg_w(gspca_dev, 0x13, &reg12_19[1], 1); 1233 reg_w(gspca_dev, 0x13, &regs[0x13], 1);
1194 /* reset 0x17 SensorClk enable inv Clk 0x60 */ 1234 /* reset 0x17 SensorClk enable inv Clk 0x60 */
1195 /*fixme: ov7630 [17]=68 8f (+20 if 102)*/ 1235 /*fixme: ov7630 [17]=68 8f (+20 if 102)*/
1196 reg_w(gspca_dev, 0x17, &reg12_19[5], 1); 1236 reg_w(gspca_dev, 0x17, &regs[0x17], 1);
1197 /*MCKSIZE ->3 */ /*fixme: not ov7630*/ 1237 /*MCKSIZE ->3 */ /*fixme: not ov7630*/
1198 reg_w(gspca_dev, 0x19, &reg12_19[7], 1); 1238 reg_w(gspca_dev, 0x19, &regs[0x19], 1);
1199 /* AE_STRX AE_STRY AE_ENDX AE_ENDY */ 1239 /* AE_STRX AE_STRY AE_ENDX AE_ENDY */
1200 reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4); 1240 reg_w(gspca_dev, 0x1c, &regs[0x1c], 4);
1201 /* Enable video transfert */ 1241 /* Enable video transfert */
1202 reg_w(gspca_dev, 0x01, &sn9c10x[0], 1); 1242 reg_w(gspca_dev, 0x01, &regs[0x01], 1);
1203 /* Compression */ 1243 /* Compression */
1204 reg_w(gspca_dev, 0x18, &reg12_19[6], 2); 1244 reg_w(gspca_dev, 0x18, &regs[0x18], 2);
1205 msleep(20); 1245 msleep(20);
1206 1246
1207 sd->reg11 = -1; 1247 sd->reg11 = -1;
@@ -1525,15 +1565,15 @@ static const struct sd_desc sd_desc = {
1525 .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge 1565 .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
1526 1566
1527 1567
1528static const struct usb_device_id device_table[] __devinitconst = { 1568static const struct usb_device_id device_table[] = {
1529 {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */ 1569 {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */
1530 {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */ 1570 {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */
1531 {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */ 1571 {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */
1532 {USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)}, 1572 {USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
1533 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, 1573 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
1534 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, 1574 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
1535#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1536 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, 1575 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
1576#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1537 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, 1577 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
1538 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1578 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1539#endif 1579#endif
@@ -1544,18 +1584,22 @@ static const struct usb_device_id device_table[] __devinitconst = {
1544 {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)}, 1584 {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
1545 {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)}, 1585 {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
1546 {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)}, 1586 {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
1547 /* {USB_DEVICE(0x0c45, 0x602b), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */ 1587 /* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
1588 /* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */
1589 {USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)},
1590 {USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)},
1591 /* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */
1548 {USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)}, 1592 {USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
1549#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 1593 {USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)},
1594 {USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)},
1550 {USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)}, 1595 {USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
1551#endif
1552 {USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)}, 1596 {USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
1553 {} 1597 {}
1554}; 1598};
1555MODULE_DEVICE_TABLE(usb, device_table); 1599MODULE_DEVICE_TABLE(usb, device_table);
1556 1600
1557/* -- device connect -- */ 1601/* -- device connect -- */
1558static int __devinit sd_probe(struct usb_interface *intf, 1602static int sd_probe(struct usb_interface *intf,
1559 const struct usb_device_id *id) 1603 const struct usb_device_id *id)
1560{ 1604{
1561 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1605 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 2d0bb17a30a2..d6f39ce1b7e1 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -25,12 +25,12 @@
25#include "gspca.h" 25#include "gspca.h"
26#include "jpeg.h" 26#include "jpeg.h"
27 27
28#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
29
30MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); 28MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
31MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver"); 29MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
32MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
33 31
32static int starcam;
33
34/* controls */ 34/* controls */
35enum e_ctrl { 35enum e_ctrl {
36 BRIGHTNESS, 36 BRIGHTNESS,
@@ -43,7 +43,7 @@ enum e_ctrl {
43 HFLIP, 43 HFLIP,
44 VFLIP, 44 VFLIP,
45 SHARPNESS, 45 SHARPNESS,
46 INFRARED, 46 ILLUM,
47 FREQ, 47 FREQ,
48 NCTRLS /* number of controls */ 48 NCTRLS /* number of controls */
49}; 49};
@@ -100,7 +100,8 @@ enum sensors {
100}; 100};
101 101
102/* device flags */ 102/* device flags */
103#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */ 103#define F_PDN_INV 0x01 /* inverse pin S_PWR_DN / sn_xxx tables */
104#define F_ILLUM 0x02 /* presence of illuminator */
104 105
105/* sn9c1xx definitions */ 106/* sn9c1xx definitions */
106/* register 0x01 */ 107/* register 0x01 */
@@ -124,7 +125,7 @@ static void setgamma(struct gspca_dev *gspca_dev);
124static void setautogain(struct gspca_dev *gspca_dev); 125static void setautogain(struct gspca_dev *gspca_dev);
125static void sethvflip(struct gspca_dev *gspca_dev); 126static void sethvflip(struct gspca_dev *gspca_dev);
126static void setsharpness(struct gspca_dev *gspca_dev); 127static void setsharpness(struct gspca_dev *gspca_dev);
127static void setinfrared(struct gspca_dev *gspca_dev); 128static void setillum(struct gspca_dev *gspca_dev);
128static void setfreq(struct gspca_dev *gspca_dev); 129static void setfreq(struct gspca_dev *gspca_dev);
129 130
130static const struct ctrl sd_ctrls[NCTRLS] = { 131static const struct ctrl sd_ctrls[NCTRLS] = {
@@ -251,18 +252,17 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
251 }, 252 },
252 .set_control = setsharpness 253 .set_control = setsharpness
253 }, 254 },
254/* mt9v111 only */ 255[ILLUM] = {
255[INFRARED] = {
256 { 256 {
257 .id = V4L2_CID_INFRARED, 257 .id = V4L2_CID_ILLUMINATORS_1,
258 .type = V4L2_CTRL_TYPE_BOOLEAN, 258 .type = V4L2_CTRL_TYPE_BOOLEAN,
259 .name = "Infrared", 259 .name = "Illuminator / infrared",
260 .minimum = 0, 260 .minimum = 0,
261 .maximum = 1, 261 .maximum = 1,
262 .step = 1, 262 .step = 1,
263 .default_value = 0, 263 .default_value = 0,
264 }, 264 },
265 .set_control = setinfrared 265 .set_control = setillum
266 }, 266 },
267/* ov7630/ov7648/ov7660 only */ 267/* ov7630/ov7648/ov7660 only */
268[FREQ] = { 268[FREQ] = {
@@ -282,32 +282,26 @@ static const struct ctrl sd_ctrls[NCTRLS] = {
282/* table of the disabled controls */ 282/* table of the disabled controls */
283static const __u32 ctrl_dis[] = { 283static const __u32 ctrl_dis[] = {
284[SENSOR_ADCM1700] = (1 << AUTOGAIN) | 284[SENSOR_ADCM1700] = (1 << AUTOGAIN) |
285 (1 << INFRARED) |
286 (1 << HFLIP) | 285 (1 << HFLIP) |
287 (1 << VFLIP) | 286 (1 << VFLIP) |
288 (1 << FREQ), 287 (1 << FREQ),
289 288
290[SENSOR_GC0307] = (1 << INFRARED) | 289[SENSOR_GC0307] = (1 << HFLIP) |
291 (1 << HFLIP) |
292 (1 << VFLIP) | 290 (1 << VFLIP) |
293 (1 << FREQ), 291 (1 << FREQ),
294 292
295[SENSOR_HV7131R] = (1 << INFRARED) | 293[SENSOR_HV7131R] = (1 << HFLIP) |
296 (1 << HFLIP) |
297 (1 << FREQ), 294 (1 << FREQ),
298 295
299[SENSOR_MI0360] = (1 << INFRARED) | 296[SENSOR_MI0360] = (1 << HFLIP) |
300 (1 << HFLIP) |
301 (1 << VFLIP) | 297 (1 << VFLIP) |
302 (1 << FREQ), 298 (1 << FREQ),
303 299
304[SENSOR_MI0360B] = (1 << INFRARED) | 300[SENSOR_MI0360B] = (1 << HFLIP) |
305 (1 << HFLIP) |
306 (1 << VFLIP) | 301 (1 << VFLIP) |
307 (1 << FREQ), 302 (1 << FREQ),
308 303
309[SENSOR_MO4000] = (1 << INFRARED) | 304[SENSOR_MO4000] = (1 << HFLIP) |
310 (1 << HFLIP) |
311 (1 << VFLIP) | 305 (1 << VFLIP) |
312 (1 << FREQ), 306 (1 << FREQ),
313 307
@@ -315,40 +309,32 @@ static const __u32 ctrl_dis[] = {
315 (1 << VFLIP) | 309 (1 << VFLIP) |
316 (1 << FREQ), 310 (1 << FREQ),
317 311
318[SENSOR_OM6802] = (1 << INFRARED) | 312[SENSOR_OM6802] = (1 << HFLIP) |
319 (1 << HFLIP) |
320 (1 << VFLIP) | 313 (1 << VFLIP) |
321 (1 << FREQ), 314 (1 << FREQ),
322 315
323[SENSOR_OV7630] = (1 << INFRARED) | 316[SENSOR_OV7630] = (1 << HFLIP),
324 (1 << HFLIP),
325 317
326[SENSOR_OV7648] = (1 << INFRARED) | 318[SENSOR_OV7648] = (1 << HFLIP),
327 (1 << HFLIP),
328 319
329[SENSOR_OV7660] = (1 << AUTOGAIN) | 320[SENSOR_OV7660] = (1 << AUTOGAIN) |
330 (1 << INFRARED) |
331 (1 << HFLIP) | 321 (1 << HFLIP) |
332 (1 << VFLIP), 322 (1 << VFLIP),
333 323
334[SENSOR_PO1030] = (1 << AUTOGAIN) | 324[SENSOR_PO1030] = (1 << AUTOGAIN) |
335 (1 << INFRARED) |
336 (1 << HFLIP) | 325 (1 << HFLIP) |
337 (1 << VFLIP) | 326 (1 << VFLIP) |
338 (1 << FREQ), 327 (1 << FREQ),
339 328
340[SENSOR_PO2030N] = (1 << AUTOGAIN) | 329[SENSOR_PO2030N] = (1 << AUTOGAIN) |
341 (1 << INFRARED) |
342 (1 << FREQ), 330 (1 << FREQ),
343 331
344[SENSOR_SOI768] = (1 << AUTOGAIN) | 332[SENSOR_SOI768] = (1 << AUTOGAIN) |
345 (1 << INFRARED) |
346 (1 << HFLIP) | 333 (1 << HFLIP) |
347 (1 << VFLIP) | 334 (1 << VFLIP) |
348 (1 << FREQ), 335 (1 << FREQ),
349 336
350[SENSOR_SP80708] = (1 << AUTOGAIN) | 337[SENSOR_SP80708] = (1 << AUTOGAIN) |
351 (1 << INFRARED) |
352 (1 << HFLIP) | 338 (1 << HFLIP) |
353 (1 << VFLIP) | 339 (1 << VFLIP) |
354 (1 << FREQ), 340 (1 << FREQ),
@@ -1822,44 +1808,46 @@ static int sd_init(struct gspca_dev *gspca_dev)
1822 PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1); 1808 PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
1823 switch (sd->bridge) { 1809 switch (sd->bridge) {
1824 case BRIDGE_SN9C102P: 1810 case BRIDGE_SN9C102P:
1811 case BRIDGE_SN9C105:
1825 if (regF1 != 0x11) 1812 if (regF1 != 0x11)
1826 return -ENODEV; 1813 return -ENODEV;
1814 break;
1815 default:
1816/* case BRIDGE_SN9C110: */
1817/* case BRIDGE_SN9C120: */
1818 if (regF1 != 0x12)
1819 return -ENODEV;
1820 }
1821
1822 switch (sd->sensor) {
1823 case SENSOR_MI0360:
1824 mi0360_probe(gspca_dev);
1825 break;
1826 case SENSOR_OV7630:
1827 ov7630_probe(gspca_dev);
1828 break;
1829 case SENSOR_OV7648:
1830 ov7648_probe(gspca_dev);
1831 break;
1832 case SENSOR_PO2030N:
1833 po2030n_probe(gspca_dev);
1834 break;
1835 }
1836
1837 switch (sd->bridge) {
1838 case BRIDGE_SN9C102P:
1827 reg_w1(gspca_dev, 0x02, regGpio[1]); 1839 reg_w1(gspca_dev, 0x02, regGpio[1]);
1828 break; 1840 break;
1829 case BRIDGE_SN9C105: 1841 case BRIDGE_SN9C105:
1830 if (regF1 != 0x11)
1831 return -ENODEV;
1832 if (sd->sensor == SENSOR_MI0360)
1833 mi0360_probe(gspca_dev);
1834 reg_w(gspca_dev, 0x01, regGpio, 2); 1842 reg_w(gspca_dev, 0x01, regGpio, 2);
1835 break; 1843 break;
1844 case BRIDGE_SN9C110:
1845 reg_w1(gspca_dev, 0x02, 0x62);
1846 break;
1836 case BRIDGE_SN9C120: 1847 case BRIDGE_SN9C120:
1837 if (regF1 != 0x12)
1838 return -ENODEV;
1839 switch (sd->sensor) {
1840 case SENSOR_MI0360:
1841 mi0360_probe(gspca_dev);
1842 break;
1843 case SENSOR_OV7630:
1844 ov7630_probe(gspca_dev);
1845 break;
1846 case SENSOR_OV7648:
1847 ov7648_probe(gspca_dev);
1848 break;
1849 case SENSOR_PO2030N:
1850 po2030n_probe(gspca_dev);
1851 break;
1852 }
1853 regGpio[1] = 0x70; /* no audio */ 1848 regGpio[1] = 0x70; /* no audio */
1854 reg_w(gspca_dev, 0x01, regGpio, 2); 1849 reg_w(gspca_dev, 0x01, regGpio, 2);
1855 break; 1850 break;
1856 default:
1857/* case BRIDGE_SN9C110: */
1858/* case BRIDGE_SN9C325: */
1859 if (regF1 != 0x12)
1860 return -ENODEV;
1861 reg_w1(gspca_dev, 0x02, 0x62);
1862 break;
1863 } 1851 }
1864 1852
1865 if (sd->sensor == SENSOR_OM6802) 1853 if (sd->sensor == SENSOR_OM6802)
@@ -1874,6 +1862,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
1874 sd->i2c_addr = sn9c1xx[9]; 1862 sd->i2c_addr = sn9c1xx[9];
1875 1863
1876 gspca_dev->ctrl_dis = ctrl_dis[sd->sensor]; 1864 gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
1865 if (!(sd->flags & F_ILLUM))
1866 gspca_dev->ctrl_dis |= (1 << ILLUM);
1877 1867
1878 return gspca_dev->usb_err; 1868 return gspca_dev->usb_err;
1879} 1869}
@@ -2197,16 +2187,28 @@ static void setsharpness(struct gspca_dev *gspca_dev)
2197 reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val); 2187 reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
2198} 2188}
2199 2189
2200static void setinfrared(struct gspca_dev *gspca_dev) 2190static void setillum(struct gspca_dev *gspca_dev)
2201{ 2191{
2202 struct sd *sd = (struct sd *) gspca_dev; 2192 struct sd *sd = (struct sd *) gspca_dev;
2203 2193
2204 if (gspca_dev->ctrl_dis & (1 << INFRARED)) 2194 if (gspca_dev->ctrl_dis & (1 << ILLUM))
2205 return; 2195 return;
2206/*fixme: different sequence for StarCam Clip and StarCam 370i */ 2196 switch (sd->sensor) {
2207/* Clip */ 2197 case SENSOR_ADCM1700:
2208 i2c_w1(gspca_dev, 0x02, /* gpio */ 2198 reg_w1(gspca_dev, 0x02, /* gpio */
2209 sd->ctrls[INFRARED].val ? 0x66 : 0x64); 2199 sd->ctrls[ILLUM].val ? 0x64 : 0x60);
2200 break;
2201 case SENSOR_MT9V111:
2202 if (starcam)
2203 reg_w1(gspca_dev, 0x02,
2204 sd->ctrls[ILLUM].val ?
2205 0x55 : 0x54); /* 370i */
2206 else
2207 reg_w1(gspca_dev, 0x02,
2208 sd->ctrls[ILLUM].val ?
2209 0x66 : 0x64); /* Clip */
2210 break;
2211 }
2210} 2212}
2211 2213
2212static void setfreq(struct gspca_dev *gspca_dev) 2214static void setfreq(struct gspca_dev *gspca_dev)
@@ -2344,7 +2346,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
2344 /* sensor clock already enabled in sd_init */ 2346 /* sensor clock already enabled in sd_init */
2345 /* reg_w1(gspca_dev, 0xf1, 0x00); */ 2347 /* reg_w1(gspca_dev, 0xf1, 0x00); */
2346 reg01 = sn9c1xx[1]; 2348 reg01 = sn9c1xx[1];
2347 if (sd->flags & PDN_INV) 2349 if (sd->flags & F_PDN_INV)
2348 reg01 ^= S_PDN_INV; /* power down inverted */ 2350 reg01 ^= S_PDN_INV; /* power down inverted */
2349 reg_w1(gspca_dev, 0x01, reg01); 2351 reg_w1(gspca_dev, 0x01, reg01);
2350 2352
@@ -2907,13 +2909,11 @@ static const struct sd_desc sd_desc = {
2907 .driver_info = (BRIDGE_ ## bridge << 16) \ 2909 .driver_info = (BRIDGE_ ## bridge << 16) \
2908 | (SENSOR_ ## sensor << 8) \ 2910 | (SENSOR_ ## sensor << 8) \
2909 | (flags) 2911 | (flags)
2910static const __devinitdata struct usb_device_id device_table[] = { 2912static const struct usb_device_id device_table[] = {
2911#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
2912 {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, 2913 {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
2913 {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, 2914 {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
2914#endif 2915 {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
2915 {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)}, 2916 {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
2916 {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
2917 {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, 2917 {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
2918 {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, 2918 {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
2919 {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)}, 2919 {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
@@ -2925,7 +2925,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
2925/* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */ 2925/* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */
2926 {USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)}, 2926 {USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
2927/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */ 2927/* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
2928 {USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)}, 2928 {USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)},
2929 /* or MT9V111 */ 2929 /* or MT9V111 */
2930/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */ 2930/* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
2931/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */ 2931/* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
@@ -2936,10 +2936,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
2936/* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */ 2936/* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */
2937/* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */ 2937/* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */
2938 {USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)}, 2938 {USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)},
2939#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
2940 {USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)}, 2939 {USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)},
2941 {USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)}, 2940 {USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
2942#endif
2943 {USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/ 2941 {USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/
2944 {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/ 2942 {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/
2945/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */ 2943/* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
@@ -2962,16 +2960,15 @@ static const __devinitdata struct usb_device_id device_table[] = {
2962/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */ 2960/* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
2963 {USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)}, 2961 {USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
2964 {USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)}, 2962 {USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
2965#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
2966 {USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)}, 2963 {USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)},
2967#endif
2968 {USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)}, 2964 {USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
2969 {USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)}, 2965 {USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
2970 {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/ 2966 {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
2971 /* or GC0305 / GC0307 */ 2967 /* or GC0305 / GC0307 */
2972 {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/ 2968 {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
2973 {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/ 2969 {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
2974 {USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/ 2970 {USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)},
2971/* {USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */ /*sn9c120b*/
2975 {} 2972 {}
2976}; 2973};
2977MODULE_DEVICE_TABLE(usb, device_table); 2974MODULE_DEVICE_TABLE(usb, device_table);
@@ -3007,3 +3004,7 @@ static void __exit sd_mod_exit(void)
3007 3004
3008module_init(sd_mod_init); 3005module_init(sd_mod_init);
3009module_exit(sd_mod_exit); 3006module_exit(sd_mod_exit);
3007
3008module_param(starcam, int, 0644);
3009MODULE_PARM_DESC(starcam,
3010 "StarCam model. 0: Clip, 1: 370i");
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index e64338664410..76c006b2bc83 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -555,7 +555,7 @@ static const struct sd_desc sd_desc = {
555}; 555};
556 556
557/* -- module initialisation -- */ 557/* -- module initialisation -- */
558static const __devinitdata struct usb_device_id device_table[] = { 558static const struct usb_device_id device_table[] = {
559 {USB_DEVICE(0x04fc, 0x1528)}, 559 {USB_DEVICE(0x04fc, 0x1528)},
560 {} 560 {}
561}; 561};
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8e202b9039f1..45552c3ff8d9 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1051,7 +1051,7 @@ static const struct sd_desc sd_desc = {
1051}; 1051};
1052 1052
1053/* -- module initialisation -- */ 1053/* -- module initialisation -- */
1054static const __devinitdata struct usb_device_id device_table[] = { 1054static const struct usb_device_id device_table[] = {
1055 {USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200}, 1055 {USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
1056 {USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300}, 1056 {USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
1057 {USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler}, 1057 {USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 642839a11e8d..f7ef282cc600 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2155,7 +2155,7 @@ static const struct sd_desc sd_desc = {
2155}; 2155};
2156 2156
2157/* -- module initialisation -- */ 2157/* -- module initialisation -- */
2158static const __devinitdata struct usb_device_id device_table[] = { 2158static const struct usb_device_id device_table[] = {
2159 {USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325}, 2159 {USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325},
2160 {USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera}, 2160 {USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera},
2161 {USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite}, 2161 {USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite},
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index bc9dd9034ab4..e5bf865147d7 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -786,7 +786,7 @@ static const struct sd_desc sd_desc = {
786}; 786};
787 787
788/* -- module initialisation -- */ 788/* -- module initialisation -- */
789static const __devinitdata struct usb_device_id device_table[] = { 789static const struct usb_device_id device_table[] = {
790 {USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra}, 790 {USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
791 {USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro}, 791 {USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
792/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */ 792/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 7307638ac91d..348319371523 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1509,7 +1509,7 @@ static const struct sd_desc sd_desc = {
1509}; 1509};
1510 1510
1511/* -- module initialisation -- */ 1511/* -- module initialisation -- */
1512static const __devinitdata struct usb_device_id device_table[] = { 1512static const struct usb_device_id device_table[] = {
1513 {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam}, 1513 {USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
1514 {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista}, 1514 {USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
1515 {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110}, 1515 {USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 3a162c6d5466..e836e778dfb6 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1061,7 +1061,7 @@ static const struct sd_desc *sd_desc[2] = {
1061}; 1061};
1062 1062
1063/* -- module initialisation -- */ 1063/* -- module initialisation -- */
1064static const __devinitdata struct usb_device_id device_table[] = { 1064static const struct usb_device_id device_table[] = {
1065 {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A}, 1065 {USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
1066 {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A}, 1066 {USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
1067 {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A}, 1067 {USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 404067745775..2e9c06175192 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -396,7 +396,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
396} 396}
397 397
398/* Table of supported USB devices */ 398/* Table of supported USB devices */
399static const __devinitdata struct usb_device_id device_table[] = { 399static const struct usb_device_id device_table[] = {
400 {USB_DEVICE(0x2770, 0x9120)}, 400 {USB_DEVICE(0x2770, 0x9120)},
401 {} 401 {}
402}; 402};
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 8ba199543856..457563b7a71b 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -298,7 +298,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
298} 298}
299 299
300/* Table of supported USB devices */ 300/* Table of supported USB devices */
301static const __devinitdata struct usb_device_id device_table[] = { 301static const struct usb_device_id device_table[] = {
302 {USB_DEVICE(0x2770, 0x905c)}, 302 {USB_DEVICE(0x2770, 0x905c)},
303 {USB_DEVICE(0x2770, 0x9050)}, 303 {USB_DEVICE(0x2770, 0x9050)},
304 {USB_DEVICE(0x2770, 0x9051)}, 304 {USB_DEVICE(0x2770, 0x9051)},
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index a4a98811b9e3..8215d5dcd456 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1163,7 +1163,7 @@ static const struct sd_desc sd_desc = {
1163#define ST(sensor, type) \ 1163#define ST(sensor, type) \
1164 .driver_info = (SENSOR_ ## sensor << 8) \ 1164 .driver_info = (SENSOR_ ## sensor << 8) \
1165 | (type) 1165 | (type)
1166static const __devinitdata struct usb_device_id device_table[] = { 1166static const struct usb_device_id device_table[] = {
1167 {USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)}, 1167 {USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)},
1168 {USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)}, 1168 {USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)},
1169 {USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)}, 1169 {USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)},
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 11a192b95ed4..87be52b5e1e3 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -495,7 +495,7 @@ static const struct sd_desc sd_desc = {
495}; 495};
496 496
497/* -- module initialisation -- */ 497/* -- module initialisation -- */
498static const __devinitdata struct usb_device_id device_table[] = { 498static const struct usb_device_id device_table[] = {
499 {USB_DEVICE(0x05e1, 0x0893)}, 499 {USB_DEVICE(0x05e1, 0x0893)},
500 {} 500 {}
501}; 501};
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index b199ad4666bd..e2ef41cf72d7 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -327,7 +327,7 @@ static const struct sd_desc sd_desc = {
327}; 327};
328 328
329/* -- module initialisation -- */ 329/* -- module initialisation -- */
330static const __devinitdata struct usb_device_id device_table[] = { 330static const struct usb_device_id device_table[] = {
331 {USB_DEVICE(0x0553, 0x0202)}, 331 {USB_DEVICE(0x0553, 0x0202)},
332 {USB_DEVICE(0x041e, 0x4007)}, 332 {USB_DEVICE(0x041e, 0x4007)},
333 {} 333 {}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 28ea4175b80e..7e0661429293 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -564,7 +564,7 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
564 564
565 565
566/* -- module initialisation -- */ 566/* -- module initialisation -- */
567static const __devinitdata struct usb_device_id device_table[] = { 567static const struct usb_device_id device_table[] = {
568 /* QuickCam Express */ 568 /* QuickCam Express */
569 {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, 569 {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
570 /* LEGO cam / QuickCam Web */ 570 /* LEGO cam / QuickCam Web */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index a9cbcd6011d9..543542af2720 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1162,7 +1162,7 @@ static const struct sd_desc sd_desc = {
1162#define BS(bridge, subtype) \ 1162#define BS(bridge, subtype) \
1163 .driver_info = (BRIDGE_ ## bridge << 8) \ 1163 .driver_info = (BRIDGE_ ## bridge << 8) \
1164 | (subtype) 1164 | (subtype)
1165static const __devinitdata struct usb_device_id device_table[] = { 1165static const struct usb_device_id device_table[] = {
1166 {USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)}, 1166 {USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)},
1167 {USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)}, 1167 {USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)},
1168 {USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)}, 1168 {USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)},
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 8f0c33116e0d..a3eccd815766 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1416,7 +1416,7 @@ static const struct sd_desc sd_desc = {
1416}; 1416};
1417 1417
1418/* -- module initialisation -- */ 1418/* -- module initialisation -- */
1419static const __devinitdata struct usb_device_id device_table[] = { 1419static const struct usb_device_id device_table[] = {
1420 {USB_DEVICE(0x17a1, 0x0128)}, 1420 {USB_DEVICE(0x17a1, 0x0128)},
1421 {} 1421 {}
1422}; 1422};
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 38c22f0a4263..933ef2ca658c 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -388,7 +388,7 @@ static const struct sd_desc sd_desc = {
388}; 388};
389 389
390/* -- module initialisation -- */ 390/* -- module initialisation -- */
391static const __devinitdata struct usb_device_id device_table[] = { 391static const struct usb_device_id device_table[] = {
392 {USB_DEVICE(0x046d, 0x0920)}, 392 {USB_DEVICE(0x046d, 0x0920)},
393 {USB_DEVICE(0x046d, 0x0921)}, 393 {USB_DEVICE(0x046d, 0x0921)},
394 {USB_DEVICE(0x0545, 0x808b)}, 394 {USB_DEVICE(0x0545, 0x808b)},
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 9b2ae1b6cc75..6caed734a06a 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4192,7 +4192,7 @@ static const struct sd_desc sd_desc = {
4192#define BF(bridge, flags) \ 4192#define BF(bridge, flags) \
4193 .driver_info = (BRIDGE_ ## bridge << 8) \ 4193 .driver_info = (BRIDGE_ ## bridge << 8) \
4194 | (flags) 4194 | (flags)
4195static const __devinitdata struct usb_device_id device_table[] = { 4195static const struct usb_device_id device_table[] = {
4196 {USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)}, 4196 {USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)},
4197 {USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)}, 4197 {USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)},
4198 {USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)}, 4198 {USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)},
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 5b5039a02031..c089a0f6f1d0 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3270,7 +3270,7 @@ static const struct sd_desc sd_desc_isoc_nego = {
3270}; 3270};
3271 3271
3272/* -- module initialisation -- */ 3272/* -- module initialisation -- */
3273static const __devinitdata struct usb_device_id device_table[] = { 3273static const struct usb_device_id device_table[] = {
3274 { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 }, 3274 { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
3275 { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 }, 3275 { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
3276 { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, 3276 { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 14b85d483163..47236a58bf33 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
5793 break; 5793 break;
5794 default: 5794 default:
5795/* case 0xdd: * delay */ 5795/* case 0xdd: * delay */
5796 msleep(action->val / 64 + 10); 5796 msleep(action->idx);
5797 break; 5797 break;
5798 } 5798 }
5799 action++; 5799 action++;
@@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
5830 [SENSOR_GC0305] = gc0305_matrix, 5830 [SENSOR_GC0305] = gc0305_matrix,
5831 [SENSOR_HDCS2020b] = NULL, 5831 [SENSOR_HDCS2020b] = NULL,
5832 [SENSOR_HV7131B] = NULL, 5832 [SENSOR_HV7131B] = NULL,
5833 [SENSOR_HV7131R] = NULL, 5833 [SENSOR_HV7131R] = po2030_matrix,
5834 [SENSOR_ICM105A] = po2030_matrix, 5834 [SENSOR_ICM105A] = po2030_matrix,
5835 [SENSOR_MC501CB] = NULL, 5835 [SENSOR_MC501CB] = NULL,
5836 [SENSOR_MT9V111_1] = gc0305_matrix, 5836 [SENSOR_MT9V111_1] = gc0305_matrix,
@@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev)
5936 case SENSOR_ADCM2700: 5936 case SENSOR_ADCM2700:
5937 case SENSOR_GC0305: 5937 case SENSOR_GC0305:
5938 case SENSOR_HV7131B: 5938 case SENSOR_HV7131B:
5939 case SENSOR_HV7131R:
5939 case SENSOR_OV7620: 5940 case SENSOR_OV7620:
5940 case SENSOR_PAS202B: 5941 case SENSOR_PAS202B:
5941 case SENSOR_PO2030: 5942 case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
6108 reg_w(gspca_dev, 0x02, 0x003b); 6109 reg_w(gspca_dev, 0x02, 0x003b);
6109 reg_w(gspca_dev, 0x00, 0x0038); 6110 reg_w(gspca_dev, 0x00, 0x0038);
6110 break; 6111 break;
6112 case SENSOR_HV7131R:
6111 case SENSOR_PAS202B: 6113 case SENSOR_PAS202B:
6112 reg_w(gspca_dev, 0x03, 0x003b); 6114 reg_w(gspca_dev, 0x03, 0x003b);
6113 reg_w(gspca_dev, 0x0c, 0x003a); 6115 reg_w(gspca_dev, 0x0c, 0x003a);
6114 reg_w(gspca_dev, 0x0b, 0x0039); 6116 reg_w(gspca_dev, 0x0b, 0x0039);
6115 reg_w(gspca_dev, 0x0b, 0x0038); 6117 if (sensor == SENSOR_PAS202B)
6118 reg_w(gspca_dev, 0x0b, 0x0038);
6116 break; 6119 break;
6117 } 6120 }
6118} 6121}
@@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
6704 reg_w(gspca_dev, 0x02, 0x003b); 6707 reg_w(gspca_dev, 0x02, 0x003b);
6705 reg_w(gspca_dev, 0x00, 0x0038); 6708 reg_w(gspca_dev, 0x00, 0x0038);
6706 break; 6709 break;
6710 case SENSOR_HV7131R:
6707 case SENSOR_PAS202B: 6711 case SENSOR_PAS202B:
6708 reg_w(gspca_dev, 0x03, 0x003b); 6712 reg_w(gspca_dev, 0x03, 0x003b);
6709 reg_w(gspca_dev, 0x0c, 0x003a); 6713 reg_w(gspca_dev, 0x0c, 0x003a);
6710 reg_w(gspca_dev, 0x0b, 0x0039); 6714 reg_w(gspca_dev, 0x0b, 0x0039);
6715 if (sd->sensor == SENSOR_HV7131R)
6716 reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
6711 break; 6717 break;
6712 } 6718 }
6713 6719
@@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
6720 break; 6726 break;
6721 case SENSOR_PAS202B: 6727 case SENSOR_PAS202B:
6722 case SENSOR_GC0305: 6728 case SENSOR_GC0305:
6729 case SENSOR_HV7131R:
6723 case SENSOR_TAS5130C: 6730 case SENSOR_TAS5130C:
6724 reg_r(gspca_dev, 0x0008); 6731 reg_r(gspca_dev, 0x0008);
6725 /* fall thru */ 6732 /* fall thru */
@@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
6760 /* ms-win + */ 6767 /* ms-win + */
6761 reg_w(gspca_dev, 0x40, 0x0117); 6768 reg_w(gspca_dev, 0x40, 0x0117);
6762 break; 6769 break;
6770 case SENSOR_HV7131R:
6771 i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
6772 i2c_write(gspca_dev, 0x26, 0x93, 0x00);
6773 i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
6774 reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
6775 break;
6763 case SENSOR_GC0305: 6776 case SENSOR_GC0305:
6764 case SENSOR_TAS5130C: 6777 case SENSOR_TAS5130C:
6765 reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ 6778 reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
6808{ 6821{
6809 struct sd *sd = (struct sd *) gspca_dev; 6822 struct sd *sd = (struct sd *) gspca_dev;
6810 6823
6811 if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */ 6824 /* check the JPEG end of frame */
6825 if (len >= 3
6826 && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
6827/*fixme: what does the last byte mean?*/
6812 gspca_frame_add(gspca_dev, LAST_PACKET, 6828 gspca_frame_add(gspca_dev, LAST_PACKET,
6813 NULL, 0); 6829 data, len - 1);
6830 return;
6831 }
6832
6833 /* check the JPEG start of a frame */
6834 if (data[0] == 0xff && data[1] == 0xd8) {
6814 /* put the JPEG header in the new frame */ 6835 /* put the JPEG header in the new frame */
6815 gspca_frame_add(gspca_dev, FIRST_PACKET, 6836 gspca_frame_add(gspca_dev, FIRST_PACKET,
6816 sd->jpeg_hdr, JPEG_HDR_SZ); 6837 sd->jpeg_hdr, JPEG_HDR_SZ);
@@ -6909,7 +6930,7 @@ static const struct sd_desc sd_desc = {
6909#endif 6930#endif
6910}; 6931};
6911 6932
6912static const __devinitdata struct usb_device_id device_table[] = { 6933static const struct usb_device_id device_table[] = {
6913 {USB_DEVICE(0x041e, 0x041e)}, 6934 {USB_DEVICE(0x041e, 0x041e)},
6914 {USB_DEVICE(0x041e, 0x4017)}, 6935 {USB_DEVICE(0x041e, 0x4017)},
6915 {USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106}, 6936 {USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index e0230fcb2e36..3baa9f613ca3 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -1,6 +1,4 @@
1hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o 1hdpvr-objs := hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
2
3hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
4 2
5obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o 3obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
6 4
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index f7d1ee55185a..a27d93b503a5 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface,
283 struct hdpvr_device *dev; 283 struct hdpvr_device *dev;
284 struct usb_host_interface *iface_desc; 284 struct usb_host_interface *iface_desc;
285 struct usb_endpoint_descriptor *endpoint; 285 struct usb_endpoint_descriptor *endpoint;
286 struct i2c_client *client;
286 size_t buffer_size; 287 size_t buffer_size;
287 int i; 288 int i;
288 int retval = -ENOMEM; 289 int retval = -ENOMEM;
@@ -378,25 +379,35 @@ static int hdpvr_probe(struct usb_interface *interface,
378 goto error; 379 goto error;
379 } 380 }
380 381
381#ifdef CONFIG_I2C 382#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
382 /* until i2c is working properly */ 383 retval = hdpvr_register_i2c_adapter(dev);
383 retval = 0; /* hdpvr_register_i2c_adapter(dev); */
384 if (retval < 0) { 384 if (retval < 0) {
385 v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n"); 385 v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
386 goto error; 386 goto error;
387 } 387 }
388 388
389 /* until i2c is working properly */ 389 client = hdpvr_register_ir_rx_i2c(dev);
390 retval = 0; /* hdpvr_register_i2c_ir(dev); */ 390 if (!client) {
391 if (retval < 0) 391 v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
392 v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n"); 392 goto reg_fail;
393#endif /* CONFIG_I2C */ 393 }
394
395 client = hdpvr_register_ir_tx_i2c(dev);
396 if (!client) {
397 v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
398 goto reg_fail;
399 }
400#endif
394 401
395 /* let the user know what node this device is now attached to */ 402 /* let the user know what node this device is now attached to */
396 v4l2_info(&dev->v4l2_dev, "device now attached to %s\n", 403 v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
397 video_device_node_name(dev->video_dev)); 404 video_device_node_name(dev->video_dev));
398 return 0; 405 return 0;
399 406
407reg_fail:
408#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
409 i2c_del_adapter(&dev->i2c_adapter);
410#endif
400error: 411error:
401 if (dev) { 412 if (dev) {
402 /* Destroy single thread */ 413 /* Destroy single thread */
@@ -426,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface)
426 mutex_lock(&dev->io_mutex); 437 mutex_lock(&dev->io_mutex);
427 hdpvr_cancel_queue(dev); 438 hdpvr_cancel_queue(dev);
428 mutex_unlock(&dev->io_mutex); 439 mutex_unlock(&dev->io_mutex);
440#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
441 i2c_del_adapter(&dev->i2c_adapter);
442#endif
429 video_unregister_device(dev->video_dev); 443 video_unregister_device(dev->video_dev);
430 atomic_dec(&dev_nr); 444 atomic_dec(&dev_nr);
431} 445}
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 24966aa02a70..e53fa55d56a1 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -13,6 +13,8 @@
13 * 13 *
14 */ 14 */
15 15
16#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
17
16#include <linux/i2c.h> 18#include <linux/i2c.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
18 20
@@ -28,106 +30,86 @@
28#define Z8F0811_IR_TX_I2C_ADDR 0x70 30#define Z8F0811_IR_TX_I2C_ADDR 0x70
29#define Z8F0811_IR_RX_I2C_ADDR 0x71 31#define Z8F0811_IR_RX_I2C_ADDR 0x71
30 32
31static const u8 ir_i2c_addrs[] = {
32 Z8F0811_IR_TX_I2C_ADDR,
33 Z8F0811_IR_RX_I2C_ADDR,
34};
35
36static const char * const ir_devicenames[] = {
37 "ir_tx_z8f0811_hdpvr",
38 "ir_rx_z8f0811_hdpvr",
39};
40 33
41static int hdpvr_new_i2c_ir(struct hdpvr_device *dev, struct i2c_adapter *adap, 34struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
42 const char *type, u8 addr)
43{ 35{
44 struct i2c_board_info info;
45 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; 36 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
46 unsigned short addr_list[2] = { addr, I2C_CLIENT_END }; 37 struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
38 I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
39 };
47 40
48 memset(&info, 0, sizeof(struct i2c_board_info)); 41 init_data->name = "HD-PVR";
49 strlcpy(info.type, type, I2C_NAME_SIZE); 42 hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
50
51 /* Our default information for ir-kbd-i2c.c to use */
52 switch (addr) {
53 case Z8F0811_IR_RX_I2C_ADDR:
54 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
55 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
56 init_data->type = RC_TYPE_RC5;
57 init_data->name = "HD PVR";
58 info.platform_data = init_data;
59 break;
60 }
61 43
62 return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ? 44 return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
63 -1 : 0;
64} 45}
65 46
66int hdpvr_register_i2c_ir(struct hdpvr_device *dev) 47struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
67{ 48{
68 int i; 49 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
69 int ret = 0; 50 struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
51 I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
52 };
70 53
71 for (i = 0; i < ARRAY_SIZE(ir_i2c_addrs); i++) 54 /* Our default information for ir-kbd-i2c.c to use */
72 ret += hdpvr_new_i2c_ir(dev, dev->i2c_adapter, 55 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
73 ir_devicenames[i], ir_i2c_addrs[i]); 56 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
57 init_data->type = RC_TYPE_RC5;
58 init_data->name = "HD-PVR";
59 hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
74 60
75 return ret; 61 return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
76} 62}
77 63
78static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr, 64static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
79 char *data, int len) 65 unsigned char addr, char *data, int len)
80{ 66{
81 int ret; 67 int ret;
82 char *buf = kmalloc(len, GFP_KERNEL); 68
83 if (!buf) 69 if (len > sizeof(dev->i2c_buf))
84 return -ENOMEM; 70 return -EINVAL;
85 71
86 ret = usb_control_msg(dev->udev, 72 ret = usb_control_msg(dev->udev,
87 usb_rcvctrlpipe(dev->udev, 0), 73 usb_rcvctrlpipe(dev->udev, 0),
88 REQTYPE_I2C_READ, CTRL_READ_REQUEST, 74 REQTYPE_I2C_READ, CTRL_READ_REQUEST,
89 0x100|addr, 0, buf, len, 1000); 75 (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
90 76
91 if (ret == len) { 77 if (ret == len) {
92 memcpy(data, buf, len); 78 memcpy(data, &dev->i2c_buf, len);
93 ret = 0; 79 ret = 0;
94 } else if (ret >= 0) 80 } else if (ret >= 0)
95 ret = -EIO; 81 ret = -EIO;
96 82
97 kfree(buf);
98
99 return ret; 83 return ret;
100} 84}
101 85
102static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr, 86static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
103 char *data, int len) 87 unsigned char addr, char *data, int len)
104{ 88{
105 int ret; 89 int ret;
106 char *buf = kmalloc(len, GFP_KERNEL);
107 if (!buf)
108 return -ENOMEM;
109 90
110 memcpy(buf, data, len); 91 if (len > sizeof(dev->i2c_buf))
92 return -EINVAL;
93
94 memcpy(&dev->i2c_buf, data, len);
111 ret = usb_control_msg(dev->udev, 95 ret = usb_control_msg(dev->udev,
112 usb_sndctrlpipe(dev->udev, 0), 96 usb_sndctrlpipe(dev->udev, 0),
113 REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, 97 REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
114 0x100|addr, 0, buf, len, 1000); 98 (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
115 99
116 if (ret < 0) 100 if (ret < 0)
117 goto error; 101 return ret;
118 102
119 ret = usb_control_msg(dev->udev, 103 ret = usb_control_msg(dev->udev,
120 usb_rcvctrlpipe(dev->udev, 0), 104 usb_rcvctrlpipe(dev->udev, 0),
121 REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, 105 REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
122 0, 0, buf, 2, 1000); 106 0, 0, &dev->i2c_buf, 2, 1000);
123 107
124 if (ret == 2) 108 if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
125 ret = 0; 109 ret = 0;
126 else if (ret >= 0) 110 else if (ret >= 0)
127 ret = -EIO; 111 ret = -EIO;
128 112
129error:
130 kfree(buf);
131 return ret; 113 return ret;
132} 114}
133 115
@@ -146,10 +128,10 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
146 addr = msgs[i].addr << 1; 128 addr = msgs[i].addr << 1;
147 129
148 if (msgs[i].flags & I2C_M_RD) 130 if (msgs[i].flags & I2C_M_RD)
149 retval = hdpvr_i2c_read(dev, addr, msgs[i].buf, 131 retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf,
150 msgs[i].len); 132 msgs[i].len);
151 else 133 else
152 retval = hdpvr_i2c_write(dev, addr, msgs[i].buf, 134 retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf,
153 msgs[i].len); 135 msgs[i].len);
154 } 136 }
155 137
@@ -168,30 +150,47 @@ static struct i2c_algorithm hdpvr_algo = {
168 .functionality = hdpvr_functionality, 150 .functionality = hdpvr_functionality,
169}; 151};
170 152
153static struct i2c_adapter hdpvr_i2c_adapter_template = {
154 .name = "Hauppage HD PVR I2C",
155 .owner = THIS_MODULE,
156 .algo = &hdpvr_algo,
157};
158
159static int hdpvr_activate_ir(struct hdpvr_device *dev)
160{
161 char buffer[8];
162
163 mutex_lock(&dev->i2c_mutex);
164
165 hdpvr_i2c_read(dev, 0, 0x54, buffer, 1);
166
167 buffer[0] = 0;
168 buffer[1] = 0x8;
169 hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
170
171 buffer[1] = 0x18;
172 hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
173
174 mutex_unlock(&dev->i2c_mutex);
175
176 return 0;
177}
178
171int hdpvr_register_i2c_adapter(struct hdpvr_device *dev) 179int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
172{ 180{
173 struct i2c_adapter *i2c_adap;
174 int retval = -ENOMEM; 181 int retval = -ENOMEM;
175 182
176 i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); 183 hdpvr_activate_ir(dev);
177 if (i2c_adap == NULL)
178 goto error;
179
180 strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
181 sizeof(i2c_adap->name));
182 i2c_adap->algo = &hdpvr_algo;
183 i2c_adap->owner = THIS_MODULE;
184 i2c_adap->dev.parent = &dev->udev->dev;
185 184
186 i2c_set_adapdata(i2c_adap, dev); 185 memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
186 sizeof(struct i2c_adapter));
187 dev->i2c_adapter.dev.parent = &dev->udev->dev;
187 188
188 retval = i2c_add_adapter(i2c_adap); 189 i2c_set_adapdata(&dev->i2c_adapter, dev);
189 190
190 if (!retval) 191 retval = i2c_add_adapter(&dev->i2c_adapter);
191 dev->i2c_adapter = i2c_adap;
192 else
193 kfree(i2c_adap);
194 192
195error:
196 return retval; 193 return retval;
197} 194}
195
196#endif
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d38fe1043e47..514aea76eaa5 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,12 +1220,9 @@ static void hdpvr_device_release(struct video_device *vdev)
1220 v4l2_device_unregister(&dev->v4l2_dev); 1220 v4l2_device_unregister(&dev->v4l2_dev);
1221 1221
1222 /* deregister I2C adapter */ 1222 /* deregister I2C adapter */
1223#ifdef CONFIG_I2C 1223#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE)
1224 mutex_lock(&dev->i2c_mutex); 1224 mutex_lock(&dev->i2c_mutex);
1225 if (dev->i2c_adapter) 1225 i2c_del_adapter(&dev->i2c_adapter);
1226 i2c_del_adapter(dev->i2c_adapter);
1227 kfree(dev->i2c_adapter);
1228 dev->i2c_adapter = NULL;
1229 mutex_unlock(&dev->i2c_mutex); 1226 mutex_unlock(&dev->i2c_mutex);
1230#endif /* CONFIG_I2C */ 1227#endif /* CONFIG_I2C */
1231 1228
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 37f1e4c7675d..072f23c570f3 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -25,6 +25,7 @@
25 KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE) 25 KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
26 26
27#define HDPVR_MAX 8 27#define HDPVR_MAX 8
28#define HDPVR_I2C_MAX_SIZE 128
28 29
29/* Define these values to match your devices */ 30/* Define these values to match your devices */
30#define HD_PVR_VENDOR_ID 0x2040 31#define HD_PVR_VENDOR_ID 0x2040
@@ -106,9 +107,11 @@ struct hdpvr_device {
106 struct work_struct worker; 107 struct work_struct worker;
107 108
108 /* I2C adapter */ 109 /* I2C adapter */
109 struct i2c_adapter *i2c_adapter; 110 struct i2c_adapter i2c_adapter;
110 /* I2C lock */ 111 /* I2C lock */
111 struct mutex i2c_mutex; 112 struct mutex i2c_mutex;
113 /* I2C message buffer space */
114 char i2c_buf[HDPVR_I2C_MAX_SIZE];
112 115
113 /* For passing data to ir-kbd-i2c */ 116 /* For passing data to ir-kbd-i2c */
114 struct IR_i2c_init_data ir_i2c_init_data; 117 struct IR_i2c_init_data ir_i2c_init_data;
@@ -310,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev);
310/* i2c adapter registration */ 313/* i2c adapter registration */
311int hdpvr_register_i2c_adapter(struct hdpvr_device *dev); 314int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
312 315
313int hdpvr_register_i2c_ir(struct hdpvr_device *dev); 316struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
317struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
314 318
315/*========================================================================*/ 319/*========================================================================*/
316/* buffer management */ 320/* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index c87b6bc45555..a221ad68b330 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
128 128
129static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 129static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
130{ 130{
131 int ret;
132 unsigned char buf[1] = { 0 };
133
134 /*
135 * This is the same apparent "are you ready?" poll command observed
136 * watching Windows driver traffic and implemented in lirc_zilog. With
137 * this added, we get far saner remote behavior with z8 chips on usb
138 * connected devices, even with the default polling interval of 100ms.
139 */
140 ret = i2c_master_send(ir->c, buf, 1);
141 if (ret != 1)
142 return (ret < 0) ? ret : -EINVAL;
143
131 return get_key_haup_common (ir, ir_key, ir_raw, 6, 3); 144 return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
132} 145}
133 146
@@ -244,15 +257,17 @@ static void ir_key_poll(struct IR_i2c *ir)
244 static u32 ir_key, ir_raw; 257 static u32 ir_key, ir_raw;
245 int rc; 258 int rc;
246 259
247 dprintk(2,"ir_poll_key\n"); 260 dprintk(3, "%s\n", __func__);
248 rc = ir->get_key(ir, &ir_key, &ir_raw); 261 rc = ir->get_key(ir, &ir_key, &ir_raw);
249 if (rc < 0) { 262 if (rc < 0) {
250 dprintk(2,"error\n"); 263 dprintk(2,"error\n");
251 return; 264 return;
252 } 265 }
253 266
254 if (rc) 267 if (rc) {
268 dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
255 rc_keydown(ir->rc, ir_key, 0); 269 rc_keydown(ir->rc, ir_key, 0);
270 }
256} 271}
257 272
258static void ir_work(struct work_struct *work) 273static void ir_work(struct work_struct *work)
@@ -321,6 +336,12 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
321 rc_type = RC_TYPE_OTHER; 336 rc_type = RC_TYPE_OTHER;
322 ir_codes = RC_MAP_AVERMEDIA_CARDBUS; 337 ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
323 break; 338 break;
339 case 0x71:
340 name = "Hauppauge/Zilog Z8";
341 ir->get_key = get_key_haup_xvr;
342 rc_type = RC_TYPE_RC5;
343 ir_codes = hauppauge ? RC_MAP_HAUPPAUGE_NEW : RC_MAP_RC5_TV;
344 break;
324 } 345 }
325 346
326 /* Let the caller override settings */ 347 /* Let the caller override settings */
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index e103b8fc7452..9fb86a081c0f 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -300,10 +300,15 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
300 adap, type, 0, I2C_ADDRS(hw_addrs[idx])); 300 adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
301 } else if (hw == IVTV_HW_CX25840) { 301 } else if (hw == IVTV_HW_CX25840) {
302 struct cx25840_platform_data pdata; 302 struct cx25840_platform_data pdata;
303 struct i2c_board_info cx25840_info = {
304 .type = "cx25840",
305 .addr = hw_addrs[idx],
306 .platform_data = &pdata,
307 };
303 308
304 pdata.pvr150_workaround = itv->pvr150_workaround; 309 pdata.pvr150_workaround = itv->pvr150_workaround;
305 sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev, 310 sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
306 adap, type, 0, &pdata, hw_addrs[idx], NULL); 311 &cx25840_info, NULL);
307 } else { 312 } else {
308 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 313 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
309 adap, type, hw_addrs[idx], NULL); 314 adap, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 209ff97261a9..4904d25f689f 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -12,17 +12,41 @@
12#include <asm/div64.h> 12#include <asm/div64.h>
13#include <media/v4l2-device.h> 13#include <media/v4l2-device.h>
14#include <media/v4l2-chip-ident.h> 14#include <media/v4l2-chip-ident.h>
15#include "mt9v011.h" 15#include <media/mt9v011.h>
16 16
17MODULE_DESCRIPTION("Micron mt9v011 sensor driver"); 17MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
18MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); 18MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
19MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
20 20
21
22static int debug; 21static int debug;
23module_param(debug, int, 0); 22module_param(debug, int, 0);
24MODULE_PARM_DESC(debug, "Debug level (0-2)"); 23MODULE_PARM_DESC(debug, "Debug level (0-2)");
25 24
25#define R00_MT9V011_CHIP_VERSION 0x00
26#define R01_MT9V011_ROWSTART 0x01
27#define R02_MT9V011_COLSTART 0x02
28#define R03_MT9V011_HEIGHT 0x03
29#define R04_MT9V011_WIDTH 0x04
30#define R05_MT9V011_HBLANK 0x05
31#define R06_MT9V011_VBLANK 0x06
32#define R07_MT9V011_OUT_CTRL 0x07
33#define R09_MT9V011_SHUTTER_WIDTH 0x09
34#define R0A_MT9V011_CLK_SPEED 0x0a
35#define R0B_MT9V011_RESTART 0x0b
36#define R0C_MT9V011_SHUTTER_DELAY 0x0c
37#define R0D_MT9V011_RESET 0x0d
38#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
39#define R20_MT9V011_READ_MODE 0x20
40#define R2B_MT9V011_GREEN_1_GAIN 0x2b
41#define R2C_MT9V011_BLUE_GAIN 0x2c
42#define R2D_MT9V011_RED_GAIN 0x2d
43#define R2E_MT9V011_GREEN_2_GAIN 0x2e
44#define R35_MT9V011_GLOBAL_GAIN 0x35
45#define RF1_MT9V011_CHIP_ENABLE 0xf1
46
47#define MT9V011_VERSION 0x8232
48#define MT9V011_REV_B_VERSION 0x8243
49
26/* supported controls */ 50/* supported controls */
27static struct v4l2_queryctrl mt9v011_qctrl[] = { 51static struct v4l2_queryctrl mt9v011_qctrl[] = {
28 { 52 {
@@ -469,23 +493,6 @@ static int mt9v011_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
469 return 0; 493 return 0;
470} 494}
471 495
472static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
473{
474 struct mt9v011 *core = to_mt9v011(sd);
475 unsigned *xtal = data;
476
477 v4l2_dbg(1, debug, sd, "s_config called\n");
478
479 if (xtal) {
480 core->xtal = *xtal;
481 v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
482 *xtal / 1000000, (*xtal / 1000) % 1000);
483 }
484
485 return 0;
486}
487
488
489#ifdef CONFIG_VIDEO_ADV_DEBUG 496#ifdef CONFIG_VIDEO_ADV_DEBUG
490static int mt9v011_g_register(struct v4l2_subdev *sd, 497static int mt9v011_g_register(struct v4l2_subdev *sd,
491 struct v4l2_dbg_register *reg) 498 struct v4l2_dbg_register *reg)
@@ -536,7 +543,6 @@ static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
536 .g_ctrl = mt9v011_g_ctrl, 543 .g_ctrl = mt9v011_g_ctrl,
537 .s_ctrl = mt9v011_s_ctrl, 544 .s_ctrl = mt9v011_s_ctrl,
538 .reset = mt9v011_reset, 545 .reset = mt9v011_reset,
539 .s_config = mt9v011_s_config,
540 .g_chip_ident = mt9v011_g_chip_ident, 546 .g_chip_ident = mt9v011_g_chip_ident,
541#ifdef CONFIG_VIDEO_ADV_DEBUG 547#ifdef CONFIG_VIDEO_ADV_DEBUG
542 .g_register = mt9v011_g_register, 548 .g_register = mt9v011_g_register,
@@ -596,6 +602,14 @@ static int mt9v011_probe(struct i2c_client *c,
596 core->height = 480; 602 core->height = 480;
597 core->xtal = 27000000; /* Hz */ 603 core->xtal = 27000000; /* Hz */
598 604
605 if (c->dev.platform_data) {
606 struct mt9v011_platform_data *pdata = c->dev.platform_data;
607
608 core->xtal = pdata->xtal;
609 v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
610 core->xtal / 1000000, (core->xtal / 1000) % 1000);
611 }
612
599 v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n", 613 v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
600 c->addr << 1, c->adapter->name, version); 614 c->addr << 1, c->adapter->name, version);
601 615
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
deleted file mode 100644
index 3350fd6083c3..000000000000
--- a/drivers/media/video/mt9v011.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
3 *
4 * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
5 * This code is placed under the terms of the GNU General Public License v2
6 */
7
8#ifndef MT9V011_H_
9#define MT9V011_H_
10
11#define R00_MT9V011_CHIP_VERSION 0x00
12#define R01_MT9V011_ROWSTART 0x01
13#define R02_MT9V011_COLSTART 0x02
14#define R03_MT9V011_HEIGHT 0x03
15#define R04_MT9V011_WIDTH 0x04
16#define R05_MT9V011_HBLANK 0x05
17#define R06_MT9V011_VBLANK 0x06
18#define R07_MT9V011_OUT_CTRL 0x07
19#define R09_MT9V011_SHUTTER_WIDTH 0x09
20#define R0A_MT9V011_CLK_SPEED 0x0a
21#define R0B_MT9V011_RESTART 0x0b
22#define R0C_MT9V011_SHUTTER_DELAY 0x0c
23#define R0D_MT9V011_RESET 0x0d
24#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
25#define R20_MT9V011_READ_MODE 0x20
26#define R2B_MT9V011_GREEN_1_GAIN 0x2b
27#define R2C_MT9V011_BLUE_GAIN 0x2c
28#define R2D_MT9V011_RED_GAIN 0x2d
29#define R2E_MT9V011_GREEN_2_GAIN 0x2e
30#define R35_MT9V011_GLOBAL_GAIN 0x35
31#define RF1_MT9V011_CHIP_ENABLE 0xf1
32
33#define MT9V011_VERSION 0x8232
34#define MT9V011_REV_B_VERSION 0x8243
35
36#endif
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index c881a64b41fd..d4e7c11553c3 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1449,47 +1449,6 @@ static int ov7670_g_chip_ident(struct v4l2_subdev *sd,
1449 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0); 1449 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
1450} 1450}
1451 1451
1452static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
1453{
1454 struct i2c_client *client = v4l2_get_subdevdata(sd);
1455 struct ov7670_config *config = data;
1456 struct ov7670_info *info = to_state(sd);
1457 int ret;
1458
1459 info->clock_speed = 30; /* default: a guess */
1460
1461 /*
1462 * Must apply configuration before initializing device, because it
1463 * selects I/O method.
1464 */
1465 if (config) {
1466 info->min_width = config->min_width;
1467 info->min_height = config->min_height;
1468 info->use_smbus = config->use_smbus;
1469
1470 if (config->clock_speed)
1471 info->clock_speed = config->clock_speed;
1472 }
1473
1474 /* Make sure it's an ov7670 */
1475 ret = ov7670_detect(sd);
1476 if (ret) {
1477 v4l_dbg(1, debug, client,
1478 "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
1479 client->addr << 1, client->adapter->name);
1480 kfree(info);
1481 return ret;
1482 }
1483 v4l_info(client, "chip found @ 0x%02x (%s)\n",
1484 client->addr << 1, client->adapter->name);
1485
1486 info->fmt = &ov7670_formats[0];
1487 info->sat = 128; /* Review this */
1488 info->clkrc = info->clock_speed / 30;
1489
1490 return 0;
1491}
1492
1493#ifdef CONFIG_VIDEO_ADV_DEBUG 1452#ifdef CONFIG_VIDEO_ADV_DEBUG
1494static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) 1453static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
1495{ 1454{
@@ -1528,7 +1487,6 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
1528 .s_ctrl = ov7670_s_ctrl, 1487 .s_ctrl = ov7670_s_ctrl,
1529 .queryctrl = ov7670_queryctrl, 1488 .queryctrl = ov7670_queryctrl,
1530 .reset = ov7670_reset, 1489 .reset = ov7670_reset,
1531 .s_config = ov7670_s_config,
1532 .init = ov7670_init, 1490 .init = ov7670_init,
1533#ifdef CONFIG_VIDEO_ADV_DEBUG 1491#ifdef CONFIG_VIDEO_ADV_DEBUG
1534 .g_register = ov7670_g_register, 1492 .g_register = ov7670_g_register,
@@ -1558,6 +1516,7 @@ static int ov7670_probe(struct i2c_client *client,
1558{ 1516{
1559 struct v4l2_subdev *sd; 1517 struct v4l2_subdev *sd;
1560 struct ov7670_info *info; 1518 struct ov7670_info *info;
1519 int ret;
1561 1520
1562 info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL); 1521 info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
1563 if (info == NULL) 1522 if (info == NULL)
@@ -1565,6 +1524,37 @@ static int ov7670_probe(struct i2c_client *client,
1565 sd = &info->sd; 1524 sd = &info->sd;
1566 v4l2_i2c_subdev_init(sd, client, &ov7670_ops); 1525 v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
1567 1526
1527 info->clock_speed = 30; /* default: a guess */
1528 if (client->dev.platform_data) {
1529 struct ov7670_config *config = client->dev.platform_data;
1530
1531 /*
1532 * Must apply configuration before initializing device, because it
1533 * selects I/O method.
1534 */
1535 info->min_width = config->min_width;
1536 info->min_height = config->min_height;
1537 info->use_smbus = config->use_smbus;
1538
1539 if (config->clock_speed)
1540 info->clock_speed = config->clock_speed;
1541 }
1542
1543 /* Make sure it's an ov7670 */
1544 ret = ov7670_detect(sd);
1545 if (ret) {
1546 v4l_dbg(1, debug, client,
1547 "chip found @ 0x%x (%s) is not an ov7670 chip.\n",
1548 client->addr << 1, client->adapter->name);
1549 kfree(info);
1550 return ret;
1551 }
1552 v4l_info(client, "chip found @ 0x%02x (%s)\n",
1553 client->addr << 1, client->adapter->name);
1554
1555 info->fmt = &ov7670_formats[0];
1556 info->sat = 128; /* Review this */
1557 info->clkrc = info->clock_speed / 30;
1568 return 0; 1558 return 0;
1569} 1559}
1570 1560
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index ac94a8bf883e..305e6aaa844a 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -40,6 +40,7 @@
40#include "pvrusb2-io.h" 40#include "pvrusb2-io.h"
41#include <media/v4l2-device.h> 41#include <media/v4l2-device.h>
42#include <media/cx2341x.h> 42#include <media/cx2341x.h>
43#include <media/ir-kbd-i2c.h>
43#include "pvrusb2-devattr.h" 44#include "pvrusb2-devattr.h"
44 45
45/* Legal values for PVR2_CID_HSM */ 46/* Legal values for PVR2_CID_HSM */
@@ -202,6 +203,7 @@ struct pvr2_hdw {
202 203
203 /* IR related */ 204 /* IR related */
204 unsigned int ir_scheme_active; /* IR scheme as seen from the outside */ 205 unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
206 struct IR_i2c_init_data ir_init_data; /* params passed to IR modules */
205 207
206 /* Frequency table */ 208 /* Frequency table */
207 unsigned int freqTable[FREQTABLE_SIZE]; 209 unsigned int freqTable[FREQTABLE_SIZE];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 7cbe18c4ca95..451ecd485f97 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <media/ir-kbd-i2c.h>
22#include "pvrusb2-i2c-core.h" 23#include "pvrusb2-i2c-core.h"
23#include "pvrusb2-hdw-internal.h" 24#include "pvrusb2-hdw-internal.h"
24#include "pvrusb2-debug.h" 25#include "pvrusb2-debug.h"
@@ -48,13 +49,6 @@ module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
48MODULE_PARM_DESC(disable_autoload_ir_video, 49MODULE_PARM_DESC(disable_autoload_ir_video,
49 "1=do not try to autoload ir_video IR receiver"); 50 "1=do not try to autoload ir_video IR receiver");
50 51
51/* Mapping of IR schemes to known I2C addresses - if any */
52static const unsigned char ir_video_addresses[] = {
53 [PVR2_IR_SCHEME_ZILOG] = 0x71,
54 [PVR2_IR_SCHEME_29XXX] = 0x18,
55 [PVR2_IR_SCHEME_24XXX] = 0x18,
56};
57
58static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */ 52static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
59 u8 i2c_addr, /* I2C address we're talking to */ 53 u8 i2c_addr, /* I2C address we're talking to */
60 u8 *data, /* Data to write */ 54 u8 *data, /* Data to write */
@@ -574,26 +568,55 @@ static void do_i2c_scan(struct pvr2_hdw *hdw)
574static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw) 568static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
575{ 569{
576 struct i2c_board_info info; 570 struct i2c_board_info info;
577 unsigned char addr = 0; 571 struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
578 if (pvr2_disable_ir_video) { 572 if (pvr2_disable_ir_video) {
579 pvr2_trace(PVR2_TRACE_INFO, 573 pvr2_trace(PVR2_TRACE_INFO,
580 "Automatic binding of ir_video has been disabled."); 574 "Automatic binding of ir_video has been disabled.");
581 return; 575 return;
582 } 576 }
583 if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) { 577 memset(&info, 0, sizeof(struct i2c_board_info));
584 addr = ir_video_addresses[hdw->ir_scheme_active]; 578 switch (hdw->ir_scheme_active) {
585 } 579 case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
586 if (!addr) { 580 case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
581 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
582 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
583 init_data->type = RC_TYPE_RC5;
584 init_data->name = hdw->hdw_desc->description;
585 init_data->polling_interval = 100; /* ms From ir-kbd-i2c */
586 /* IR Receiver */
587 info.addr = 0x18;
588 info.platform_data = init_data;
589 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
590 pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
591 info.type, info.addr);
592 i2c_new_device(&hdw->i2c_adap, &info);
593 break;
594 case PVR2_IR_SCHEME_ZILOG: /* HVR-1950 style */
595 case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
596 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
597 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
598 init_data->type = RC_TYPE_RC5;
599 init_data->name = hdw->hdw_desc->description;
600 /* IR Receiver */
601 info.addr = 0x71;
602 info.platform_data = init_data;
603 strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
604 pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
605 info.type, info.addr);
606 i2c_new_device(&hdw->i2c_adap, &info);
607 /* IR Trasmitter */
608 info.addr = 0x70;
609 info.platform_data = init_data;
610 strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
611 pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
612 info.type, info.addr);
613 i2c_new_device(&hdw->i2c_adap, &info);
614 break;
615 default:
587 /* The device either doesn't support I2C-based IR or we 616 /* The device either doesn't support I2C-based IR or we
588 don't know (yet) how to operate IR on the device. */ 617 don't know (yet) how to operate IR on the device. */
589 return; 618 break;
590 } 619 }
591 pvr2_trace(PVR2_TRACE_INFO,
592 "Binding ir_video to i2c address 0x%02x.", addr);
593 memset(&info, 0, sizeof(struct i2c_board_info));
594 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
595 info.addr = addr;
596 i2c_new_device(&hdw->i2c_adap, &info);
597} 620}
598 621
599void pvr2_i2c_core_init(struct pvr2_hdw *hdw) 622void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d1f42f..0db90922ee93 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client,
1565 chip_id = name[5]; 1565 chip_id = name[5];
1566 1566
1567 /* Check whether this chip is part of the saa711x series */ 1567 /* Check whether this chip is part of the saa711x series */
1568 if (memcmp(name, "1f711", 5)) { 1568 if (memcmp(name + 1, "f711", 4)) {
1569 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", 1569 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
1570 client->addr << 1, name); 1570 client->addr << 1, name);
1571 return -ENODEV; 1571 return -ENODEV;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e7aa588c6c5a..deb8fcf4aa49 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5179,18 +5179,8 @@ struct saa7134_board saa7134_boards[] = {
5179 [SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = { 5179 [SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = {
5180 .name = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid", 5180 .name = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid",
5181 .audio_clock = 0x00187de7, 5181 .audio_clock = 0x00187de7,
5182#if 0 5182 .tuner_type = TUNER_PHILIPS_TDA8290,
5183 /*
5184 * FIXME: Analog mode doesn't work, if digital is enabled. The proper
5185 * fix is to use tda8290 driver, but Kworld seems to use an
5186 * unsupported version of tda8295.
5187 */
5188 .tuner_type = TUNER_NXP_TDA18271, /* TUNER_PHILIPS_TDA8290 */
5189 .tuner_addr = 0x60,
5190#else
5191 .tuner_type = UNSET,
5192 .tuner_addr = ADDR_UNSET, 5183 .tuner_addr = ADDR_UNSET,
5193#endif
5194 .radio_type = UNSET, 5184 .radio_type = UNSET,
5195 .radio_addr = ADDR_UNSET, 5185 .radio_addr = ADDR_UNSET,
5196 .gpiomask = 0x8e054000, 5186 .gpiomask = 0x8e054000,
@@ -6932,10 +6922,17 @@ static inline int saa7134_kworld_sbtvd_toggle_agc(struct saa7134_dev *dev,
6932 /* toggle AGC switch through GPIO 27 */ 6922 /* toggle AGC switch through GPIO 27 */
6933 switch (mode) { 6923 switch (mode) {
6934 case TDA18271_ANALOG: 6924 case TDA18271_ANALOG:
6935 saa7134_set_gpio(dev, 27, 0); 6925 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
6926 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
6927 msleep(20);
6936 break; 6928 break;
6937 case TDA18271_DIGITAL: 6929 case TDA18271_DIGITAL:
6938 saa7134_set_gpio(dev, 27, 1); 6930 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
6931 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
6932 msleep(20);
6933 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
6934 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
6935 msleep(30);
6939 break; 6936 break;
6940 default: 6937 default:
6941 return -EINVAL; 6938 return -EINVAL;
@@ -6993,6 +6990,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
6993int saa7134_tuner_callback(void *priv, int component, int command, int arg) 6990int saa7134_tuner_callback(void *priv, int component, int command, int arg)
6994{ 6991{
6995 struct saa7134_dev *dev = priv; 6992 struct saa7134_dev *dev = priv;
6993
6996 if (dev != NULL) { 6994 if (dev != NULL) {
6997 switch (dev->tuner_type) { 6995 switch (dev->tuner_type) {
6998 case TUNER_PHILIPS_TDA8290: 6996 case TUNER_PHILIPS_TDA8290:
@@ -7659,36 +7657,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
7659 break; 7657 break;
7660 } 7658 }
7661 case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG: 7659 case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
7662 {
7663 struct i2c_msg msg = { .addr = 0x4b, .flags = 0 };
7664 int i;
7665 static u8 buffer[][2] = {
7666 {0x30, 0x31},
7667 {0xff, 0x00},
7668 {0x41, 0x03},
7669 {0x41, 0x1a},
7670 {0xff, 0x02},
7671 {0x34, 0x00},
7672 {0x45, 0x97},
7673 {0x45, 0xc1},
7674 };
7675 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000); 7660 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
7676 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000); 7661 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
7677 7662
7678 /* 7663 saa7134_set_gpio(dev, 27, 0);
7679 * FIXME: identify what device is at addr 0x4b and what means
7680 * this initialization
7681 */
7682 for (i = 0; i < ARRAY_SIZE(buffer); i++) {
7683 msg.buf = &buffer[i][0];
7684 msg.len = ARRAY_SIZE(buffer[0]);
7685 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
7686 printk(KERN_WARNING
7687 "%s: Unable to enable tuner(%i).\n",
7688 dev->name, i);
7689 }
7690 break; 7664 break;
7691 }
7692 } /* switch() */ 7665 } /* switch() */
7693 7666
7694 /* initialize tuner */ 7667 /* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 3315a48a848b..f65cad287b83 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -237,12 +237,39 @@ static struct tda18271_std_map mb86a20s_tda18271_std_map = {
237static struct tda18271_config kworld_tda18271_config = { 237static struct tda18271_config kworld_tda18271_config = {
238 .std_map = &mb86a20s_tda18271_std_map, 238 .std_map = &mb86a20s_tda18271_std_map,
239 .gate = TDA18271_GATE_DIGITAL, 239 .gate = TDA18271_GATE_DIGITAL,
240 .config = 3, /* Use tuner callback for AGC */
241
240}; 242};
241 243
242static const struct mb86a20s_config kworld_mb86a20s_config = { 244static const struct mb86a20s_config kworld_mb86a20s_config = {
243 .demod_address = 0x10, 245 .demod_address = 0x10,
244}; 246};
245 247
248static int kworld_sbtvd_gate_ctrl(struct dvb_frontend* fe, int enable)
249{
250 struct saa7134_dev *dev = fe->dvb->priv;
251
252 unsigned char initmsg[] = {0x45, 0x97};
253 unsigned char msg_enable[] = {0x45, 0xc1};
254 unsigned char msg_disable[] = {0x45, 0x81};
255 struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
256
257 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
258 wprintk("could not access the I2C gate\n");
259 return -EIO;
260 }
261 if (enable)
262 msg.buf = msg_enable;
263 else
264 msg.buf = msg_disable;
265 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
266 wprintk("could not access the I2C gate\n");
267 return -EIO;
268 }
269 msleep(20);
270 return 0;
271}
272
246/* ================================================================== 273/* ==================================================================
247 * tda1004x based DVB-T cards, helper functions 274 * tda1004x based DVB-T cards, helper functions
248 */ 275 */
@@ -623,37 +650,6 @@ static struct tda827x_config tda827x_cfg_2_sw42 = {
623 650
624/* ------------------------------------------------------------------ */ 651/* ------------------------------------------------------------------ */
625 652
626static int __kworld_sbtvd_i2c_gate_ctrl(struct saa7134_dev *dev, int enable)
627{
628 unsigned char initmsg[] = {0x45, 0x97};
629 unsigned char msg_enable[] = {0x45, 0xc1};
630 unsigned char msg_disable[] = {0x45, 0x81};
631 struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
632
633 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
634 wprintk("could not access the I2C gate\n");
635 return -EIO;
636 }
637 if (enable)
638 msg.buf = msg_enable;
639 else
640 msg.buf = msg_disable;
641 if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
642 wprintk("could not access the I2C gate\n");
643 return -EIO;
644 }
645 msleep(20);
646 return 0;
647}
648static int kworld_sbtvd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
649{
650 struct saa7134_dev *dev = fe->dvb->priv;
651
652 return __kworld_sbtvd_i2c_gate_ctrl(dev, enable);
653}
654
655/* ------------------------------------------------------------------ */
656
657static struct tda1004x_config tda827x_lifeview_config = { 653static struct tda1004x_config tda827x_lifeview_config = {
658 .demod_address = 0x08, 654 .demod_address = 0x08,
659 .invert = 1, 655 .invert = 1,
@@ -1660,27 +1656,23 @@ static int dvb_init(struct saa7134_dev *dev)
1660 } 1656 }
1661 break; 1657 break;
1662 case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG: 1658 case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
1663 __kworld_sbtvd_i2c_gate_ctrl(dev, 0); 1659 /* Switch to digital mode */
1664 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000); 1660 saa7134_tuner_callback(dev, 0,
1665 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000); 1661 TDA18271_CALLBACK_CMD_AGC_ENABLE, 1);
1666 msleep(20);
1667 saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
1668 saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
1669 msleep(20);
1670 fe0->dvb.frontend = dvb_attach(mb86a20s_attach, 1662 fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
1671 &kworld_mb86a20s_config, 1663 &kworld_mb86a20s_config,
1672 &dev->i2c_adap); 1664 &dev->i2c_adap);
1673 __kworld_sbtvd_i2c_gate_ctrl(dev, 1);
1674 if (fe0->dvb.frontend != NULL) { 1665 if (fe0->dvb.frontend != NULL) {
1666 dvb_attach(tda829x_attach, fe0->dvb.frontend,
1667 &dev->i2c_adap, 0x4b,
1668 &tda829x_no_probe);
1675 dvb_attach(tda18271_attach, fe0->dvb.frontend, 1669 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1676 0x60, &dev->i2c_adap, 1670 0x60, &dev->i2c_adap,
1677 &kworld_tda18271_config); 1671 &kworld_tda18271_config);
1678 /* 1672 fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_gate_ctrl;
1679 * Only after success, it can initialize the gate, otherwise
1680 * an OOPS will hit, due to kfree(fe0->dvb.frontend)
1681 */
1682 fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_i2c_gate_ctrl;
1683 } 1673 }
1674
1675 /* mb86a20s need to use the I2C gateway */
1684 break; 1676 break;
1685 default: 1677 default:
1686 wprintk("Huh? unknown DVB card?\n"); 1678 wprintk("Huh? unknown DVB card?\n");
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 41064c7b5ef8..b3d2cc729657 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -47,8 +47,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
47 { SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), }, 47 { SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ 49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
50#endif
51 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
51#endif
52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
54#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE 54#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
@@ -56,78 +56,68 @@ static const struct usb_device_id sn9c102_id_table[] = {
56 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 56 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
57 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 57 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
58#endif 58#endif
59 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, 59 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, /* not in sonixb */
60#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE 60#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
61 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), }, 61 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
62/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */ 62/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
63 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), }, 63 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
64#endif 64#endif
65 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, 65 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, /* not in sonixb */
66 /* SN9C103 */ 66 /* SN9C103 */
67 { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, 67/* { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, non existent ? */
68 { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, 68 { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, /* not in sonixb */
69#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
69/* { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */ 70/* { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
70 { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, 71/* { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, non existent ? */
71 { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, 72/* { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, non existent ? */
72 { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, 73/* { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, non existent ? */
73 { SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), }, 74 { SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
74/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */ 75/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
75#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
76 { SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), }, 76 { SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
77#endif 77/* { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, non existent ? */
78 { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, 78/* { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, non existent ? */
79 { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, 79/* { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, non existent ? */
80 { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
81/* { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */ 80/* { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
82/* { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */ 81/* { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
83/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */ 82/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5110, non existent */
84 { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, 83/* { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, non existent ? */
85 { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, 84/* { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, non existent ? */
86 { SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), }, 85 { SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
87#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
88 { SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), }, 86 { SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
87/* { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, non existent ? */
88/* { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, non existent ? */
89/* { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, non existent ? */
90/* { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), }, non existent ? */
91/* { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), }, non existent ? */
92/* { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, non existent ? */
93/* { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, non existent ? */
89#endif 94#endif
90 { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
91 { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
92 { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
93 { SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), },
94 { SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), },
95 { SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
96 { SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
97 /* SN9C105 */ 95 /* SN9C105 */
98#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE 96#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
99 { SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), }, 97 { SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
100 { SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), }, 98 { SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
101 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), }, 99 { SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
102 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), }, 100 { SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
103#endif
104 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), }, 101 { SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
105 { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, 102/* { SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, PO1030 */
106 { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, 103/* { SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, OM6801 */
107 { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, 104/* { SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, HV7131GP */
108 { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, 105/* { SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, non existent ? */
109 { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, 106/* { SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, MO4000 */
110 { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, 107/* { SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, ICM105C */
111 { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, 108/* { SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, OV7648 */
112 { SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), }, 109 { SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
113 { SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), }, 110 { SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
114 { SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), }, 111 { SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
115 /* SN9C120 */ 112 /* SN9C120 */
116 { SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), }, 113 { SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
117#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE 114/* { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, po2030 */
118 { SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, 115/* { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, om6801 */
119#endif 116/* { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, S5K53BEB */
120 { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
121 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
122#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
123 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), }, 117 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
124#endif
125/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */ 118/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
126#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
127 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), }, 119 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
128#endif
129 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), }, 120 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
130#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
131 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), }, 121 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
132 { SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), }, 122 { SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
133#endif 123#endif
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 864696b7a006..c901721a1db3 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -714,15 +714,6 @@ static int sr030pc30_base_config(struct v4l2_subdev *sd)
714 return ret; 714 return ret;
715} 715}
716 716
717static int sr030pc30_s_config(struct v4l2_subdev *sd,
718 int irq, void *platform_data)
719{
720 struct sr030pc30_info *info = to_sr030pc30(sd);
721
722 info->pdata = platform_data;
723 return 0;
724}
725
726static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable) 717static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
727{ 718{
728 return 0; 719 return 0;
@@ -763,7 +754,6 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
763} 754}
764 755
765static const struct v4l2_subdev_core_ops sr030pc30_core_ops = { 756static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
766 .s_config = sr030pc30_s_config,
767 .s_power = sr030pc30_s_power, 757 .s_power = sr030pc30_s_power,
768 .queryctrl = sr030pc30_queryctrl, 758 .queryctrl = sr030pc30_queryctrl,
769 .s_ctrl = sr030pc30_s_ctrl, 759 .s_ctrl = sr030pc30_s_ctrl,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
deleted file mode 100644
index 35b6ff5db319..000000000000
--- a/drivers/media/video/tda9875.c
+++ /dev/null
@@ -1,411 +0,0 @@
1/*
2 * For the TDA9875 chip
3 * (The TDA9875 is used on the Diamond DTV2000 french version
4 * Other cards probably use these chips as well.)
5 * This driver will not complain if used with any
6 * other i2c device with the same address.
7 *
8 * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source and
9 * Eric Sandeen
10 * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
11 * This code is placed under the terms of the GNU General Public License
12 * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
13 * Which was based on tda8425.c by Greg Alexander (c) 1998
14 *
15 * OPTIONS:
16 * debug - set to 1 if you'd like to see debug messages
17 *
18 * Revision: 0.1 - original version
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/timer.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
28#include <linux/i2c.h>
29#include <linux/videodev2.h>
30#include <media/v4l2-device.h>
31#include <media/i2c-addr.h>
32
33static int debug; /* insmod parameter */
34module_param(debug, int, S_IRUGO | S_IWUSR);
35MODULE_LICENSE("GPL");
36
37
38/* This is a superset of the TDA9875 */
39struct tda9875 {
40 struct v4l2_subdev sd;
41 int rvol, lvol;
42 int bass, treble;
43};
44
45static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
46{
47 return container_of(sd, struct tda9875, sd);
48}
49
50#define dprintk if (debug) printk
51
52/* The TDA9875 is made by Philips Semiconductor
53 * http://www.semiconductors.philips.com
54 * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator
55 *
56 */
57
58 /* subaddresses for TDA9875 */
59#define TDA9875_MUT 0x12 /*General mute (value --> 0b11001100*/
60#define TDA9875_CFG 0x01 /* Config register (value --> 0b00000000 */
61#define TDA9875_DACOS 0x13 /*DAC i/o select (ADC) 0b0000100*/
62#define TDA9875_LOSR 0x16 /*Line output select regirter 0b0100 0001*/
63
64#define TDA9875_CH1V 0x0c /*Channel 1 volume (mute)*/
65#define TDA9875_CH2V 0x0d /*Channel 2 volume (mute)*/
66#define TDA9875_SC1 0x14 /*SCART 1 in (mono)*/
67#define TDA9875_SC2 0x15 /*SCART 2 in (mono)*/
68
69#define TDA9875_ADCIS 0x17 /*ADC input select (mono) 0b0110 000*/
70#define TDA9875_AER 0x19 /*Audio effect (AVL+Pseudo) 0b0000 0110*/
71#define TDA9875_MCS 0x18 /*Main channel select (DAC) 0b0000100*/
72#define TDA9875_MVL 0x1a /* Main volume gauche */
73#define TDA9875_MVR 0x1b /* Main volume droite */
74#define TDA9875_MBA 0x1d /* Main Basse */
75#define TDA9875_MTR 0x1e /* Main treble */
76#define TDA9875_ACS 0x1f /* Auxilary channel select (FM) 0b0000000*/
77#define TDA9875_AVL 0x20 /* Auxilary volume gauche */
78#define TDA9875_AVR 0x21 /* Auxilary volume droite */
79#define TDA9875_ABA 0x22 /* Auxilary Basse */
80#define TDA9875_ATR 0x23 /* Auxilary treble */
81
82#define TDA9875_MSR 0x02 /* Monitor select register */
83#define TDA9875_C1MSB 0x03 /* Carrier 1 (FM) frequency register MSB */
84#define TDA9875_C1MIB 0x04 /* Carrier 1 (FM) frequency register (16-8]b */
85#define TDA9875_C1LSB 0x05 /* Carrier 1 (FM) frequency register LSB */
86#define TDA9875_C2MSB 0x06 /* Carrier 2 (nicam) frequency register MSB */
87#define TDA9875_C2MIB 0x07 /* Carrier 2 (nicam) frequency register (16-8]b */
88#define TDA9875_C2LSB 0x08 /* Carrier 2 (nicam) frequency register LSB */
89#define TDA9875_DCR 0x09 /* Demodulateur configuration regirter*/
90#define TDA9875_DEEM 0x0a /* FM de-emphasis regirter*/
91#define TDA9875_FMAT 0x0b /* FM Matrix regirter*/
92
93/* values */
94#define TDA9875_MUTE_ON 0xff /* general mute */
95#define TDA9875_MUTE_OFF 0xcc /* general no mute */
96
97
98
99/* Begin code */
100
101static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
102{
103 struct i2c_client *client = v4l2_get_subdevdata(sd);
104 unsigned char buffer[2];
105
106 v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
107 buffer[0] = subaddr;
108 buffer[1] = val;
109 if (2 != i2c_master_send(client, buffer, 2)) {
110 v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
111 subaddr, val);
112 return -1;
113 }
114 return 0;
115}
116
117
118static int i2c_read_register(struct i2c_client *client, int addr, int reg)
119{
120 unsigned char write[1];
121 unsigned char read[1];
122 struct i2c_msg msgs[2] = {
123 { addr, 0, 1, write },
124 { addr, I2C_M_RD, 1, read }
125 };
126
127 write[0] = reg;
128
129 if (2 != i2c_transfer(client->adapter, msgs, 2)) {
130 v4l_warn(client, "I/O error (read2)\n");
131 return -1;
132 }
133 v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
134 return read[0];
135}
136
137static void tda9875_set(struct v4l2_subdev *sd)
138{
139 struct tda9875 *tda = to_state(sd);
140 unsigned char a;
141
142 v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
143 tda->lvol, tda->rvol, tda->bass, tda->treble);
144
145 a = tda->lvol & 0xff;
146 tda9875_write(sd, TDA9875_MVL, a);
147 a =tda->rvol & 0xff;
148 tda9875_write(sd, TDA9875_MVR, a);
149 a =tda->bass & 0xff;
150 tda9875_write(sd, TDA9875_MBA, a);
151 a =tda->treble & 0xff;
152 tda9875_write(sd, TDA9875_MTR, a);
153}
154
155static void do_tda9875_init(struct v4l2_subdev *sd)
156{
157 struct tda9875 *t = to_state(sd);
158
159 v4l2_dbg(1, debug, sd, "In tda9875_init\n");
160 tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
161 tda9875_write(sd, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/
162 tda9875_write(sd, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/
163 tda9875_write(sd, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/
164 tda9875_write(sd, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/
165 tda9875_write(sd, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/
166 tda9875_write(sd, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/
167 tda9875_write(sd, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/
168 tda9875_write(sd, TDA9875_DCR, 0x00); /*Demod config 0x00*/
169 tda9875_write(sd, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/
170 tda9875_write(sd, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/
171 tda9875_write(sd, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/
172 tda9875_write(sd, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/
173
174 tda9875_write(sd, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/
175 tda9875_write(sd, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */
176 tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
177 tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
178 tda9875_write(sd, TDA9875_LOSR, 0x00); /* line out (in:mono)*/
179 tda9875_write(sd, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */
180 tda9875_write(sd, TDA9875_MCS, 0x44); /* Main ch select (DAC) */
181 tda9875_write(sd, TDA9875_MVL, 0x03); /* Vol Main left 10dB */
182 tda9875_write(sd, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/
183 tda9875_write(sd, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/
184 tda9875_write(sd, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/
185 tda9875_write(sd, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/
186 tda9875_write(sd, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/
187 tda9875_write(sd, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/
188 tda9875_write(sd, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/
189 tda9875_write(sd, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/
190
191 tda9875_write(sd, TDA9875_MUT, 0xcc); /* General mute */
192
193 t->lvol = t->rvol = 0; /* 0dB */
194 t->bass = 0; /* 0dB */
195 t->treble = 0; /* 0dB */
196 tda9875_set(sd);
197}
198
199
200static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
201{
202 struct tda9875 *t = to_state(sd);
203
204 switch (ctrl->id) {
205 case V4L2_CID_AUDIO_VOLUME:
206 {
207 int left = (t->lvol+84)*606;
208 int right = (t->rvol+84)*606;
209
210 ctrl->value=max(left,right);
211 return 0;
212 }
213 case V4L2_CID_AUDIO_BALANCE:
214 {
215 int left = (t->lvol+84)*606;
216 int right = (t->rvol+84)*606;
217 int volume = max(left,right);
218 int balance = (32768*min(left,right))/
219 (volume ? volume : 1);
220 ctrl->value=(left<right)?
221 (65535-balance) : balance;
222 return 0;
223 }
224 case V4L2_CID_AUDIO_BASS:
225 ctrl->value = (t->bass+12)*2427; /* min -12 max +15 */
226 return 0;
227 case V4L2_CID_AUDIO_TREBLE:
228 ctrl->value = (t->treble+12)*2730;/* min -12 max +12 */
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
235{
236 struct tda9875 *t = to_state(sd);
237 int chvol = 0, volume = 0, balance = 0, left, right;
238
239 switch (ctrl->id) {
240 case V4L2_CID_AUDIO_VOLUME:
241 left = (t->lvol+84)*606;
242 right = (t->rvol+84)*606;
243
244 volume = max(left,right);
245 balance = (32768*min(left,right))/
246 (volume ? volume : 1);
247 balance =(left<right)?
248 (65535-balance) : balance;
249
250 volume = ctrl->value;
251
252 chvol=1;
253 break;
254 case V4L2_CID_AUDIO_BALANCE:
255 left = (t->lvol+84)*606;
256 right = (t->rvol+84)*606;
257
258 volume=max(left,right);
259
260 balance = ctrl->value;
261
262 chvol=1;
263 break;
264 case V4L2_CID_AUDIO_BASS:
265 t->bass = ((ctrl->value/2400)-12) & 0xff;
266 if (t->bass > 15)
267 t->bass = 15;
268 if (t->bass < -12)
269 t->bass = -12 & 0xff;
270 break;
271 case V4L2_CID_AUDIO_TREBLE:
272 t->treble = ((ctrl->value/2700)-12) & 0xff;
273 if (t->treble > 12)
274 t->treble = 12;
275 if (t->treble < -12)
276 t->treble = -12 & 0xff;
277 break;
278 default:
279 return -EINVAL;
280 }
281
282 if (chvol) {
283 left = (min(65536 - balance,32768) *
284 volume) / 32768;
285 right = (min(balance,32768) *
286 volume) / 32768;
287 t->lvol = ((left/606)-84) & 0xff;
288 if (t->lvol > 24)
289 t->lvol = 24;
290 if (t->lvol < -84)
291 t->lvol = -84 & 0xff;
292
293 t->rvol = ((right/606)-84) & 0xff;
294 if (t->rvol > 24)
295 t->rvol = 24;
296 if (t->rvol < -84)
297 t->rvol = -84 & 0xff;
298 }
299
300 tda9875_set(sd);
301 return 0;
302}
303
304static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
305{
306 switch (qc->id) {
307 case V4L2_CID_AUDIO_VOLUME:
308 return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
309 case V4L2_CID_AUDIO_BASS:
310 case V4L2_CID_AUDIO_TREBLE:
311 return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
312 }
313 return -EINVAL;
314}
315
316/* ----------------------------------------------------------------------- */
317
318static const struct v4l2_subdev_core_ops tda9875_core_ops = {
319 .queryctrl = tda9875_queryctrl,
320 .g_ctrl = tda9875_g_ctrl,
321 .s_ctrl = tda9875_s_ctrl,
322};
323
324static const struct v4l2_subdev_ops tda9875_ops = {
325 .core = &tda9875_core_ops,
326};
327
328/* ----------------------------------------------------------------------- */
329
330
331/* *********************** *
332 * i2c interface functions *
333 * *********************** */
334
335static int tda9875_checkit(struct i2c_client *client, int addr)
336{
337 int dic, rev;
338
339 dic = i2c_read_register(client, addr, 254);
340 rev = i2c_read_register(client, addr, 255);
341
342 if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
343 v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
344 dic == 0 ? "" : "A", rev, addr << 1);
345 return 1;
346 }
347 v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
348 addr << 1, dic, rev);
349 return 0;
350}
351
352static int tda9875_probe(struct i2c_client *client,
353 const struct i2c_device_id *id)
354{
355 struct tda9875 *t;
356 struct v4l2_subdev *sd;
357
358 v4l_info(client, "chip found @ 0x%02x (%s)\n",
359 client->addr << 1, client->adapter->name);
360
361 if (!tda9875_checkit(client, client->addr))
362 return -ENODEV;
363
364 t = kzalloc(sizeof(*t), GFP_KERNEL);
365 if (!t)
366 return -ENOMEM;
367 sd = &t->sd;
368 v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
369
370 do_tda9875_init(sd);
371 return 0;
372}
373
374static int tda9875_remove(struct i2c_client *client)
375{
376 struct v4l2_subdev *sd = i2c_get_clientdata(client);
377
378 do_tda9875_init(sd);
379 v4l2_device_unregister_subdev(sd);
380 kfree(to_state(sd));
381 return 0;
382}
383
384static const struct i2c_device_id tda9875_id[] = {
385 { "tda9875", 0 },
386 { }
387};
388MODULE_DEVICE_TABLE(i2c, tda9875_id);
389
390static struct i2c_driver tda9875_driver = {
391 .driver = {
392 .owner = THIS_MODULE,
393 .name = "tda9875",
394 },
395 .probe = tda9875_probe,
396 .remove = tda9875_remove,
397 .id_table = tda9875_id,
398};
399
400static __init int init_tda9875(void)
401{
402 return i2c_add_driver(&tda9875_driver);
403}
404
405static __exit void exit_tda9875(void)
406{
407 i2c_del_driver(&tda9875_driver);
408}
409
410module_init(init_tda9875);
411module_exit(exit_tda9875);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a1ffe18640fe..df33a1d188bb 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -512,19 +512,20 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
512 int buf_size, gfp_t gfp_flags, 512 int buf_size, gfp_t gfp_flags,
513 usb_complete_t complete_fn, void *context) 513 usb_complete_t complete_fn, void *context)
514{ 514{
515 struct urb *urb; 515 int i = 0;
516 void *mem;
517 int i;
518 516
519 for (i = 0; i < num; i++) { 517 for (; i < num; i++) {
520 urb = usb_alloc_urb(0, gfp_flags); 518 void *mem;
519 struct urb *urb = usb_alloc_urb(0, gfp_flags);
521 if (urb == NULL) 520 if (urb == NULL)
522 return i; 521 return i;
523 522
524 mem = usb_alloc_coherent(udev, buf_size, gfp_flags, 523 mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
525 &urb->transfer_dma); 524 &urb->transfer_dma);
526 if (mem == NULL) 525 if (mem == NULL) {
526 usb_free_urb(urb);
527 return i; 527 return i;
528 }
528 529
529 usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr), 530 usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
530 mem, buf_size, complete_fn, context); 531 mem, buf_size, complete_fn, context);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 3f0871b550ad..810eef43c216 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -407,18 +407,6 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
407 /* Decrease the module use count to match the first try_module_get. */ 407 /* Decrease the module use count to match the first try_module_get. */
408 module_put(client->driver->driver.owner); 408 module_put(client->driver->driver.owner);
409 409
410 if (sd) {
411 /* We return errors from v4l2_subdev_call only if we have the
412 callback as the .s_config is not mandatory */
413 int err = v4l2_subdev_call(sd, core, s_config,
414 info->irq, info->platform_data);
415
416 if (err && err != -ENOIOCTLCMD) {
417 v4l2_device_unregister_subdev(sd);
418 sd = NULL;
419 }
420 }
421
422error: 410error:
423 /* If we have a client but no subdev, then something went wrong and 411 /* If we have a client but no subdev, then something went wrong and
424 we must unregister the client. */ 412 we must unregister the client. */
@@ -428,9 +416,8 @@ error:
428} 416}
429EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); 417EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
430 418
431struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, 419struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
432 struct i2c_adapter *adapter, const char *client_type, 420 struct i2c_adapter *adapter, const char *client_type,
433 int irq, void *platform_data,
434 u8 addr, const unsigned short *probe_addrs) 421 u8 addr, const unsigned short *probe_addrs)
435{ 422{
436 struct i2c_board_info info; 423 struct i2c_board_info info;
@@ -440,12 +427,10 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
440 memset(&info, 0, sizeof(info)); 427 memset(&info, 0, sizeof(info));
441 strlcpy(info.type, client_type, sizeof(info.type)); 428 strlcpy(info.type, client_type, sizeof(info.type));
442 info.addr = addr; 429 info.addr = addr;
443 info.irq = irq;
444 info.platform_data = platform_data;
445 430
446 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs); 431 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
447} 432}
448EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg); 433EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
449 434
450/* Return i2c client address of v4l2_subdev. */ 435/* Return i2c client address of v4l2_subdev. */
451unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) 436unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 8f81efcfcf56..ef66d2af0c57 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -569,7 +569,7 @@ static int user_to_new(struct v4l2_ext_control *c,
569 int ret; 569 int ret;
570 u32 size; 570 u32 size;
571 571
572 ctrl->has_new = 1; 572 ctrl->is_new = 1;
573 switch (ctrl->type) { 573 switch (ctrl->type) {
574 case V4L2_CTRL_TYPE_INTEGER64: 574 case V4L2_CTRL_TYPE_INTEGER64:
575 ctrl->val64 = c->value64; 575 ctrl->val64 = c->value64;
@@ -1280,8 +1280,12 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
1280 if (ctrl->done) 1280 if (ctrl->done)
1281 continue; 1281 continue;
1282 1282
1283 for (i = 0; i < master->ncontrols; i++) 1283 for (i = 0; i < master->ncontrols; i++) {
1284 cur_to_new(master->cluster[i]); 1284 if (master->cluster[i]) {
1285 cur_to_new(master->cluster[i]);
1286 master->cluster[i]->is_new = 1;
1287 }
1288 }
1285 1289
1286 /* Skip button controls and read-only controls. */ 1290 /* Skip button controls and read-only controls. */
1287 if (ctrl->type == V4L2_CTRL_TYPE_BUTTON || 1291 if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
@@ -1340,12 +1344,15 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
1340 1344
1341 ctrl = ref->ctrl; 1345 ctrl = ref->ctrl;
1342 memset(qc, 0, sizeof(*qc)); 1346 memset(qc, 0, sizeof(*qc));
1343 qc->id = ctrl->id; 1347 if (id >= V4L2_CID_PRIVATE_BASE)
1348 qc->id = id;
1349 else
1350 qc->id = ctrl->id;
1344 strlcpy(qc->name, ctrl->name, sizeof(qc->name)); 1351 strlcpy(qc->name, ctrl->name, sizeof(qc->name));
1345 qc->minimum = ctrl->minimum; 1352 qc->minimum = ctrl->minimum;
1346 qc->maximum = ctrl->maximum; 1353 qc->maximum = ctrl->maximum;
1347 qc->default_value = ctrl->default_value; 1354 qc->default_value = ctrl->default_value;
1348 if (qc->type == V4L2_CTRL_TYPE_MENU) 1355 if (ctrl->type == V4L2_CTRL_TYPE_MENU)
1349 qc->step = 1; 1356 qc->step = 1;
1350 else 1357 else
1351 qc->step = ctrl->step; 1358 qc->step = ctrl->step;
@@ -1645,7 +1652,7 @@ static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
1645 if (ctrl == NULL) 1652 if (ctrl == NULL)
1646 continue; 1653 continue;
1647 1654
1648 if (ctrl->has_new) { 1655 if (ctrl->is_new) {
1649 /* Double check this: it may have changed since the 1656 /* Double check this: it may have changed since the
1650 last check in try_or_set_ext_ctrls(). */ 1657 last check in try_or_set_ext_ctrls(). */
1651 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) 1658 if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
@@ -1719,13 +1726,13 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
1719 1726
1720 v4l2_ctrl_lock(ctrl); 1727 v4l2_ctrl_lock(ctrl);
1721 1728
1722 /* Reset the 'has_new' flags of the cluster */ 1729 /* Reset the 'is_new' flags of the cluster */
1723 for (j = 0; j < master->ncontrols; j++) 1730 for (j = 0; j < master->ncontrols; j++)
1724 if (master->cluster[j]) 1731 if (master->cluster[j])
1725 master->cluster[j]->has_new = 0; 1732 master->cluster[j]->is_new = 0;
1726 1733
1727 /* Copy the new caller-supplied control values. 1734 /* Copy the new caller-supplied control values.
1728 user_to_new() sets 'has_new' to 1. */ 1735 user_to_new() sets 'is_new' to 1. */
1729 ret = cluster_walk(i, cs, helpers, user_to_new); 1736 ret = cluster_walk(i, cs, helpers, user_to_new);
1730 1737
1731 if (!ret) 1738 if (!ret)
@@ -1820,15 +1827,18 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
1820 int ret; 1827 int ret;
1821 int i; 1828 int i;
1822 1829
1830 if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
1831 return -EACCES;
1832
1823 v4l2_ctrl_lock(ctrl); 1833 v4l2_ctrl_lock(ctrl);
1824 1834
1825 /* Reset the 'has_new' flags of the cluster */ 1835 /* Reset the 'is_new' flags of the cluster */
1826 for (i = 0; i < master->ncontrols; i++) 1836 for (i = 0; i < master->ncontrols; i++)
1827 if (master->cluster[i]) 1837 if (master->cluster[i])
1828 master->cluster[i]->has_new = 0; 1838 master->cluster[i]->is_new = 0;
1829 1839
1830 ctrl->val = *val; 1840 ctrl->val = *val;
1831 ctrl->has_new = 1; 1841 ctrl->is_new = 1;
1832 ret = try_or_set_control_cluster(master, false); 1842 ret = try_or_set_control_cluster(master, false);
1833 if (!ret) 1843 if (!ret)
1834 ret = try_or_set_control_cluster(master, true); 1844 ret = try_or_set_control_cluster(master, true);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 359e23290a7e..341764a3a990 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -419,6 +419,10 @@ static int get_index(struct video_device *vdev)
419 * The registration code assigns minor numbers and device node numbers 419 * The registration code assigns minor numbers and device node numbers
420 * based on the requested type and registers the new device node with 420 * based on the requested type and registers the new device node with
421 * the kernel. 421 * the kernel.
422 *
423 * This function assumes that struct video_device was zeroed when it
424 * was allocated and does not contain any stale date.
425 *
422 * An error is returned if no free minor or device node number could be 426 * An error is returned if no free minor or device node number could be
423 * found, or if the registration of the device node failed. 427 * found, or if the registration of the device node failed.
424 * 428 *
@@ -440,7 +444,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
440 int minor_offset = 0; 444 int minor_offset = 0;
441 int minor_cnt = VIDEO_NUM_DEVICES; 445 int minor_cnt = VIDEO_NUM_DEVICES;
442 const char *name_base; 446 const char *name_base;
443 void *priv = vdev->dev.p;
444 447
445 /* A minor value of -1 marks this video device as never 448 /* A minor value of -1 marks this video device as never
446 having been registered */ 449 having been registered */
@@ -559,10 +562,6 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
559 } 562 }
560 563
561 /* Part 4: register the device with sysfs */ 564 /* Part 4: register the device with sysfs */
562 memset(&vdev->dev, 0, sizeof(vdev->dev));
563 /* The memset above cleared the device's device_private, so
564 put back the copy we made earlier. */
565 vdev->dev.p = priv;
566 vdev->dev.class = &video_class; 565 vdev->dev.class = &video_class;
567 vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); 566 vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
568 if (vdev->parent) 567 if (vdev->parent)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 7fe6f92af480..ce64fe16bc60 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -100,6 +100,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
100 is a platform bus, then it is never deleted. */ 100 is a platform bus, then it is never deleted. */
101 if (client) 101 if (client)
102 i2c_unregister_device(client); 102 i2c_unregister_device(client);
103 continue;
103 } 104 }
104#endif 105#endif
105#if defined(CONFIG_SPI) 106#if defined(CONFIG_SPI)
@@ -108,6 +109,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
108 109
109 if (spi) 110 if (spi)
110 spi_unregister_device(spi); 111 spi_unregister_device(spi);
112 continue;
111 } 113 }
112#endif 114#endif
113 } 115 }
@@ -126,11 +128,19 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
126 WARN_ON(sd->v4l2_dev != NULL); 128 WARN_ON(sd->v4l2_dev != NULL);
127 if (!try_module_get(sd->owner)) 129 if (!try_module_get(sd->owner))
128 return -ENODEV; 130 return -ENODEV;
131 sd->v4l2_dev = v4l2_dev;
132 if (sd->internal_ops && sd->internal_ops->registered) {
133 err = sd->internal_ops->registered(sd);
134 if (err)
135 return err;
136 }
129 /* This just returns 0 if either of the two args is NULL */ 137 /* This just returns 0 if either of the two args is NULL */
130 err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler); 138 err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
131 if (err) 139 if (err) {
140 if (sd->internal_ops && sd->internal_ops->unregistered)
141 sd->internal_ops->unregistered(sd);
132 return err; 142 return err;
133 sd->v4l2_dev = v4l2_dev; 143 }
134 spin_lock(&v4l2_dev->lock); 144 spin_lock(&v4l2_dev->lock);
135 list_add_tail(&sd->list, &v4l2_dev->subdevs); 145 list_add_tail(&sd->list, &v4l2_dev->subdevs);
136 spin_unlock(&v4l2_dev->lock); 146 spin_unlock(&v4l2_dev->lock);
@@ -146,6 +156,8 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
146 spin_lock(&sd->v4l2_dev->lock); 156 spin_lock(&sd->v4l2_dev->lock);
147 list_del(&sd->list); 157 list_del(&sd->list);
148 spin_unlock(&sd->v4l2_dev->lock); 158 spin_unlock(&sd->v4l2_dev->lock);
159 if (sd->internal_ops && sd->internal_ops->unregistered)
160 sd->internal_ops->unregistered(sd);
149 sd->v4l2_dev = NULL; 161 sd->v4l2_dev = NULL;
150 module_put(sd->owner); 162 module_put(sd->owner);
151} 163}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7e47f15f350d..f51327ef6757 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1659,20 +1659,24 @@ static long __video_do_ioctl(struct file *file,
1659 { 1659 {
1660 struct v4l2_dbg_register *p = arg; 1660 struct v4l2_dbg_register *p = arg;
1661 1661
1662 if (!capable(CAP_SYS_ADMIN)) 1662 if (ops->vidioc_g_register) {
1663 ret = -EPERM; 1663 if (!capable(CAP_SYS_ADMIN))
1664 else if (ops->vidioc_g_register) 1664 ret = -EPERM;
1665 ret = ops->vidioc_g_register(file, fh, p); 1665 else
1666 ret = ops->vidioc_g_register(file, fh, p);
1667 }
1666 break; 1668 break;
1667 } 1669 }
1668 case VIDIOC_DBG_S_REGISTER: 1670 case VIDIOC_DBG_S_REGISTER:
1669 { 1671 {
1670 struct v4l2_dbg_register *p = arg; 1672 struct v4l2_dbg_register *p = arg;
1671 1673
1672 if (!capable(CAP_SYS_ADMIN)) 1674 if (ops->vidioc_s_register) {
1673 ret = -EPERM; 1675 if (!capable(CAP_SYS_ADMIN))
1674 else if (ops->vidioc_s_register) 1676 ret = -EPERM;
1675 ret = ops->vidioc_s_register(file, fh, p); 1677 else
1678 ret = ops->vidioc_s_register(file, fh, p);
1679 }
1676 break; 1680 break;
1677 } 1681 }
1678#endif 1682#endif
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 019ee206cbee..fa35639d0c15 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -937,6 +937,7 @@ static void w9966_term(struct w9966 *cam)
937 parport_unregister_device(cam->pdev); 937 parport_unregister_device(cam->pdev);
938 w9966_set_state(cam, W9966_STATE_PDEV, 0); 938 w9966_set_state(cam, W9966_STATE_PDEV, 0);
939 } 939 }
940 memset(cam, 0, sizeof(*cam));
940} 941}
941 942
942 943
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 9cdc3bb15b15..9f2bac519647 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1041,7 +1041,7 @@ zr36057_init (struct zoran *zr)
1041 /* allocate memory *before* doing anything to the hardware 1041 /* allocate memory *before* doing anything to the hardware
1042 * in case allocation fails */ 1042 * in case allocation fails */
1043 zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL); 1043 zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
1044 zr->video_dev = kmalloc(sizeof(struct video_device), GFP_KERNEL); 1044 zr->video_dev = video_device_alloc();
1045 if (!zr->stat_com || !zr->video_dev) { 1045 if (!zr->stat_com || !zr->video_dev) {
1046 dprintk(1, 1046 dprintk(1,
1047 KERN_ERR 1047 KERN_ERR
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 20895e7a99c9..793300c554b4 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -361,12 +361,6 @@ static struct pm860x_irq_data pm860x_irqs[] = {
361 }, 361 },
362}; 362};
363 363
364static inline struct pm860x_irq_data *irq_to_pm860x(struct pm860x_chip *chip,
365 int irq)
366{
367 return &pm860x_irqs[irq - chip->irq_base];
368}
369
370static irqreturn_t pm860x_irq(int irq, void *data) 364static irqreturn_t pm860x_irq(int irq, void *data)
371{ 365{
372 struct pm860x_chip *chip = data; 366 struct pm860x_chip *chip = data;
@@ -388,16 +382,16 @@ static irqreturn_t pm860x_irq(int irq, void *data)
388 return IRQ_HANDLED; 382 return IRQ_HANDLED;
389} 383}
390 384
391static void pm860x_irq_lock(unsigned int irq) 385static void pm860x_irq_lock(struct irq_data *data)
392{ 386{
393 struct pm860x_chip *chip = get_irq_chip_data(irq); 387 struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
394 388
395 mutex_lock(&chip->irq_lock); 389 mutex_lock(&chip->irq_lock);
396} 390}
397 391
398static void pm860x_irq_sync_unlock(unsigned int irq) 392static void pm860x_irq_sync_unlock(struct irq_data *data)
399{ 393{
400 struct pm860x_chip *chip = get_irq_chip_data(irq); 394 struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
401 struct pm860x_irq_data *irq_data; 395 struct pm860x_irq_data *irq_data;
402 struct i2c_client *i2c; 396 struct i2c_client *i2c;
403 static unsigned char cached[3] = {0x0, 0x0, 0x0}; 397 static unsigned char cached[3] = {0x0, 0x0, 0x0};
@@ -439,25 +433,25 @@ static void pm860x_irq_sync_unlock(unsigned int irq)
439 mutex_unlock(&chip->irq_lock); 433 mutex_unlock(&chip->irq_lock);
440} 434}
441 435
442static void pm860x_irq_enable(unsigned int irq) 436static void pm860x_irq_enable(struct irq_data *data)
443{ 437{
444 struct pm860x_chip *chip = get_irq_chip_data(irq); 438 struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
445 pm860x_irqs[irq - chip->irq_base].enable 439 pm860x_irqs[data->irq - chip->irq_base].enable
446 = pm860x_irqs[irq - chip->irq_base].offs; 440 = pm860x_irqs[data->irq - chip->irq_base].offs;
447} 441}
448 442
449static void pm860x_irq_disable(unsigned int irq) 443static void pm860x_irq_disable(struct irq_data *data)
450{ 444{
451 struct pm860x_chip *chip = get_irq_chip_data(irq); 445 struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
452 pm860x_irqs[irq - chip->irq_base].enable = 0; 446 pm860x_irqs[data->irq - chip->irq_base].enable = 0;
453} 447}
454 448
455static struct irq_chip pm860x_irq_chip = { 449static struct irq_chip pm860x_irq_chip = {
456 .name = "88pm860x", 450 .name = "88pm860x",
457 .bus_lock = pm860x_irq_lock, 451 .irq_bus_lock = pm860x_irq_lock,
458 .bus_sync_unlock = pm860x_irq_sync_unlock, 452 .irq_bus_sync_unlock = pm860x_irq_sync_unlock,
459 .enable = pm860x_irq_enable, 453 .irq_enable = pm860x_irq_enable,
460 .disable = pm860x_irq_disable, 454 .irq_disable = pm860x_irq_disable,
461}; 455};
462 456
463static int __devinit device_gpadc_init(struct pm860x_chip *chip, 457static int __devinit device_gpadc_init(struct pm860x_chip *chip,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index da9d2971102e..fd018366d670 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -496,13 +496,13 @@ config EZX_PCAP
496 496
497config AB8500_CORE 497config AB8500_CORE
498 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" 498 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
499 depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500 499 depends on GENERIC_HARDIRQS && ABX500_CORE
500 select MFD_CORE 500 select MFD_CORE
501 help 501 help
502 Select this option to enable access to AB8500 power management 502 Select this option to enable access to AB8500 power management
503 chip. This connects to U8500 either on the SSP/SPI bus 503 chip. This connects to U8500 either on the SSP/SPI bus (deprecated
504 or the I2C bus via PRCMU. It also adds the irq_chip 504 since hardware version v1.0) or the I2C bus via PRCMU. It also adds
505 parts for handling the Mixed Signal chip events. 505 the irq_chip parts for handling the Mixed Signal chip events.
506 This chip embeds various other multimedia funtionalities as well. 506 This chip embeds various other multimedia funtionalities as well.
507 507
508config AB8500_I2C_CORE 508config AB8500_I2C_CORE
@@ -537,6 +537,14 @@ config AB3550_CORE
537 LEDs, vibrator, system power and temperature, power management 537 LEDs, vibrator, system power and temperature, power management
538 and ALSA sound. 538 and ALSA sound.
539 539
540config MFD_CS5535
541 tristate "Support for CS5535 and CS5536 southbridge core functions"
542 select MFD_CORE
543 depends on PCI
544 ---help---
545 This is the core driver for CS5535/CS5536 MFD functions. This is
546 necessary for using the board's GPIO and MFGPT functionality.
547
540config MFD_TIMBERDALE 548config MFD_TIMBERDALE
541 tristate "Support for the Timberdale FPGA" 549 tristate "Support for the Timberdale FPGA"
542 select MFD_CORE 550 select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 848e7eac75aa..a54e2c7c6a1c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -70,7 +70,7 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
70obj-$(CONFIG_AB3100_CORE) += ab3100-core.o 70obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
71obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o 71obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
72obj-$(CONFIG_AB3550_CORE) += ab3550-core.o 72obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
73obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o 73obj-$(CONFIG_AB8500_CORE) += ab8500-core.o
74obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o 74obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
75obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o 75obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
76obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o 76obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
@@ -82,3 +82,4 @@ obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
82obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o 82obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o
83obj-$(CONFIG_MFD_VX855) += vx855.o 83obj-$(CONFIG_MFD_VX855) += vx855.o
84obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o 84obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
85obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 8a98739e6d9c..5fbca346b998 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1159,15 +1159,16 @@ static void ab3550_mask_work(struct work_struct *work)
1159 } 1159 }
1160} 1160}
1161 1161
1162static void ab3550_mask(unsigned int irq) 1162static void ab3550_mask(struct irq_data *data)
1163{ 1163{
1164 unsigned long flags; 1164 unsigned long flags;
1165 struct ab3550 *ab; 1165 struct ab3550 *ab;
1166 struct ab3550_platform_data *plf_data; 1166 struct ab3550_platform_data *plf_data;
1167 int irq;
1167 1168
1168 ab = get_irq_chip_data(irq); 1169 ab = irq_data_get_irq_chip_data(data);
1169 plf_data = ab->i2c_client[0]->dev.platform_data; 1170 plf_data = ab->i2c_client[0]->dev.platform_data;
1170 irq -= plf_data->irq.base; 1171 irq = data->irq - plf_data->irq.base;
1171 1172
1172 spin_lock_irqsave(&ab->event_lock, flags); 1173 spin_lock_irqsave(&ab->event_lock, flags);
1173 ab->event_mask[irq / 8] |= BIT(irq % 8); 1174 ab->event_mask[irq / 8] |= BIT(irq % 8);
@@ -1176,15 +1177,16 @@ static void ab3550_mask(unsigned int irq)
1176 schedule_work(&ab->mask_work); 1177 schedule_work(&ab->mask_work);
1177} 1178}
1178 1179
1179static void ab3550_unmask(unsigned int irq) 1180static void ab3550_unmask(struct irq_data *data)
1180{ 1181{
1181 unsigned long flags; 1182 unsigned long flags;
1182 struct ab3550 *ab; 1183 struct ab3550 *ab;
1183 struct ab3550_platform_data *plf_data; 1184 struct ab3550_platform_data *plf_data;
1185 int irq;
1184 1186
1185 ab = get_irq_chip_data(irq); 1187 ab = irq_data_get_irq_chip_data(data);
1186 plf_data = ab->i2c_client[0]->dev.platform_data; 1188 plf_data = ab->i2c_client[0]->dev.platform_data;
1187 irq -= plf_data->irq.base; 1189 irq = data->irq - plf_data->irq.base;
1188 1190
1189 spin_lock_irqsave(&ab->event_lock, flags); 1191 spin_lock_irqsave(&ab->event_lock, flags);
1190 ab->event_mask[irq / 8] &= ~BIT(irq % 8); 1192 ab->event_mask[irq / 8] &= ~BIT(irq % 8);
@@ -1193,20 +1195,16 @@ static void ab3550_unmask(unsigned int irq)
1193 schedule_work(&ab->mask_work); 1195 schedule_work(&ab->mask_work);
1194} 1196}
1195 1197
1196static void noop(unsigned int irq) 1198static void noop(struct irq_data *data)
1197{ 1199{
1198} 1200}
1199 1201
1200static struct irq_chip ab3550_irq_chip = { 1202static struct irq_chip ab3550_irq_chip = {
1201 .name = "ab3550-core", /* Keep the same name as the request */ 1203 .name = "ab3550-core", /* Keep the same name as the request */
1202 .startup = NULL, /* defaults to enable */ 1204 .irq_disable = ab3550_mask, /* No default to mask in chip.c */
1203 .shutdown = NULL, /* defaults to disable */ 1205 .irq_ack = noop,
1204 .enable = NULL, /* defaults to unmask */ 1206 .irq_mask = ab3550_mask,
1205 .disable = ab3550_mask, /* No default to mask in chip.c */ 1207 .irq_unmask = ab3550_unmask,
1206 .ack = noop,
1207 .mask = ab3550_mask,
1208 .unmask = ab3550_unmask,
1209 .end = NULL,
1210}; 1208};
1211 1209
1212struct ab_family_id { 1210struct ab_family_id {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d9640a623ff4..b6887014d687 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -52,6 +52,7 @@
52#define AB8500_IT_LATCH8_REG 0x27 52#define AB8500_IT_LATCH8_REG 0x27
53#define AB8500_IT_LATCH9_REG 0x28 53#define AB8500_IT_LATCH9_REG 0x28
54#define AB8500_IT_LATCH10_REG 0x29 54#define AB8500_IT_LATCH10_REG 0x29
55#define AB8500_IT_LATCH12_REG 0x2B
55#define AB8500_IT_LATCH19_REG 0x32 56#define AB8500_IT_LATCH19_REG 0x32
56#define AB8500_IT_LATCH20_REG 0x33 57#define AB8500_IT_LATCH20_REG 0x33
57#define AB8500_IT_LATCH21_REG 0x34 58#define AB8500_IT_LATCH21_REG 0x34
@@ -98,13 +99,17 @@
98 * offset 0. 99 * offset 0.
99 */ 100 */
100static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = { 101static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
101 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21, 102 0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21,
102}; 103};
103 104
104static int ab8500_get_chip_id(struct device *dev) 105static int ab8500_get_chip_id(struct device *dev)
105{ 106{
106 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); 107 struct ab8500 *ab8500;
107 return (int)ab8500->chip_id; 108
109 if (!dev)
110 return -EINVAL;
111 ab8500 = dev_get_drvdata(dev->parent);
112 return ab8500 ? (int)ab8500->chip_id : -EINVAL;
108} 113}
109 114
110static int set_register_interruptible(struct ab8500 *ab8500, u8 bank, 115static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -228,16 +233,16 @@ static struct abx500_ops ab8500_ops = {
228 .startup_irq_enabled = NULL, 233 .startup_irq_enabled = NULL,
229}; 234};
230 235
231static void ab8500_irq_lock(unsigned int irq) 236static void ab8500_irq_lock(struct irq_data *data)
232{ 237{
233 struct ab8500 *ab8500 = get_irq_chip_data(irq); 238 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
234 239
235 mutex_lock(&ab8500->irq_lock); 240 mutex_lock(&ab8500->irq_lock);
236} 241}
237 242
238static void ab8500_irq_sync_unlock(unsigned int irq) 243static void ab8500_irq_sync_unlock(struct irq_data *data)
239{ 244{
240 struct ab8500 *ab8500 = get_irq_chip_data(irq); 245 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
241 int i; 246 int i;
242 247
243 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { 248 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
@@ -248,6 +253,10 @@ static void ab8500_irq_sync_unlock(unsigned int irq)
248 if (new == old) 253 if (new == old)
249 continue; 254 continue;
250 255
256 /* Interrupt register 12 does'nt exist prior to version 0x20 */
257 if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
258 continue;
259
251 ab8500->oldmask[i] = new; 260 ab8500->oldmask[i] = new;
252 261
253 reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i]; 262 reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
@@ -257,20 +266,20 @@ static void ab8500_irq_sync_unlock(unsigned int irq)
257 mutex_unlock(&ab8500->irq_lock); 266 mutex_unlock(&ab8500->irq_lock);
258} 267}
259 268
260static void ab8500_irq_mask(unsigned int irq) 269static void ab8500_irq_mask(struct irq_data *data)
261{ 270{
262 struct ab8500 *ab8500 = get_irq_chip_data(irq); 271 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
263 int offset = irq - ab8500->irq_base; 272 int offset = data->irq - ab8500->irq_base;
264 int index = offset / 8; 273 int index = offset / 8;
265 int mask = 1 << (offset % 8); 274 int mask = 1 << (offset % 8);
266 275
267 ab8500->mask[index] |= mask; 276 ab8500->mask[index] |= mask;
268} 277}
269 278
270static void ab8500_irq_unmask(unsigned int irq) 279static void ab8500_irq_unmask(struct irq_data *data)
271{ 280{
272 struct ab8500 *ab8500 = get_irq_chip_data(irq); 281 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
273 int offset = irq - ab8500->irq_base; 282 int offset = data->irq - ab8500->irq_base;
274 int index = offset / 8; 283 int index = offset / 8;
275 int mask = 1 << (offset % 8); 284 int mask = 1 << (offset % 8);
276 285
@@ -279,10 +288,10 @@ static void ab8500_irq_unmask(unsigned int irq)
279 288
280static struct irq_chip ab8500_irq_chip = { 289static struct irq_chip ab8500_irq_chip = {
281 .name = "ab8500", 290 .name = "ab8500",
282 .bus_lock = ab8500_irq_lock, 291 .irq_bus_lock = ab8500_irq_lock,
283 .bus_sync_unlock = ab8500_irq_sync_unlock, 292 .irq_bus_sync_unlock = ab8500_irq_sync_unlock,
284 .mask = ab8500_irq_mask, 293 .irq_mask = ab8500_irq_mask,
285 .unmask = ab8500_irq_unmask, 294 .irq_unmask = ab8500_irq_unmask,
286}; 295};
287 296
288static irqreturn_t ab8500_irq(int irq, void *dev) 297static irqreturn_t ab8500_irq(int irq, void *dev)
@@ -297,6 +306,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
297 int status; 306 int status;
298 u8 value; 307 u8 value;
299 308
309 /* Interrupt register 12 does'nt exist prior to version 0x20 */
310 if (regoffset == 11 && ab8500->chip_id < 0x20)
311 continue;
312
300 status = get_register_interruptible(ab8500, AB8500_INTERRUPT, 313 status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
301 AB8500_IT_LATCH1_REG + regoffset, &value); 314 AB8500_IT_LATCH1_REG + regoffset, &value);
302 if (status < 0 || value == 0) 315 if (status < 0 || value == 0)
@@ -393,13 +406,195 @@ static struct resource ab8500_poweronkey_db_resources[] = {
393 }, 406 },
394}; 407};
395 408
409static struct resource ab8500_bm_resources[] = {
410 {
411 .name = "MAIN_EXT_CH_NOT_OK",
412 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
413 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
414 .flags = IORESOURCE_IRQ,
415 },
416 {
417 .name = "BATT_OVV",
418 .start = AB8500_INT_BATT_OVV,
419 .end = AB8500_INT_BATT_OVV,
420 .flags = IORESOURCE_IRQ,
421 },
422 {
423 .name = "MAIN_CH_UNPLUG_DET",
424 .start = AB8500_INT_MAIN_CH_UNPLUG_DET,
425 .end = AB8500_INT_MAIN_CH_UNPLUG_DET,
426 .flags = IORESOURCE_IRQ,
427 },
428 {
429 .name = "MAIN_CHARGE_PLUG_DET",
430 .start = AB8500_INT_MAIN_CH_PLUG_DET,
431 .end = AB8500_INT_MAIN_CH_PLUG_DET,
432 .flags = IORESOURCE_IRQ,
433 },
434 {
435 .name = "VBUS_DET_F",
436 .start = AB8500_INT_VBUS_DET_F,
437 .end = AB8500_INT_VBUS_DET_F,
438 .flags = IORESOURCE_IRQ,
439 },
440 {
441 .name = "VBUS_DET_R",
442 .start = AB8500_INT_VBUS_DET_R,
443 .end = AB8500_INT_VBUS_DET_R,
444 .flags = IORESOURCE_IRQ,
445 },
446 {
447 .name = "BAT_CTRL_INDB",
448 .start = AB8500_INT_BAT_CTRL_INDB,
449 .end = AB8500_INT_BAT_CTRL_INDB,
450 .flags = IORESOURCE_IRQ,
451 },
452 {
453 .name = "CH_WD_EXP",
454 .start = AB8500_INT_CH_WD_EXP,
455 .end = AB8500_INT_CH_WD_EXP,
456 .flags = IORESOURCE_IRQ,
457 },
458 {
459 .name = "VBUS_OVV",
460 .start = AB8500_INT_VBUS_OVV,
461 .end = AB8500_INT_VBUS_OVV,
462 .flags = IORESOURCE_IRQ,
463 },
464 {
465 .name = "NCONV_ACCU",
466 .start = AB8500_INT_CCN_CONV_ACC,
467 .end = AB8500_INT_CCN_CONV_ACC,
468 .flags = IORESOURCE_IRQ,
469 },
470 {
471 .name = "LOW_BAT_F",
472 .start = AB8500_INT_LOW_BAT_F,
473 .end = AB8500_INT_LOW_BAT_F,
474 .flags = IORESOURCE_IRQ,
475 },
476 {
477 .name = "LOW_BAT_R",
478 .start = AB8500_INT_LOW_BAT_R,
479 .end = AB8500_INT_LOW_BAT_R,
480 .flags = IORESOURCE_IRQ,
481 },
482 {
483 .name = "BTEMP_LOW",
484 .start = AB8500_INT_BTEMP_LOW,
485 .end = AB8500_INT_BTEMP_LOW,
486 .flags = IORESOURCE_IRQ,
487 },
488 {
489 .name = "BTEMP_HIGH",
490 .start = AB8500_INT_BTEMP_HIGH,
491 .end = AB8500_INT_BTEMP_HIGH,
492 .flags = IORESOURCE_IRQ,
493 },
494 {
495 .name = "USB_CHARGER_NOT_OKR",
496 .start = AB8500_INT_USB_CHARGER_NOT_OK,
497 .end = AB8500_INT_USB_CHARGER_NOT_OK,
498 .flags = IORESOURCE_IRQ,
499 },
500 {
501 .name = "USB_CHARGE_DET_DONE",
502 .start = AB8500_INT_USB_CHG_DET_DONE,
503 .end = AB8500_INT_USB_CHG_DET_DONE,
504 .flags = IORESOURCE_IRQ,
505 },
506 {
507 .name = "USB_CH_TH_PROT_R",
508 .start = AB8500_INT_USB_CH_TH_PROT_R,
509 .end = AB8500_INT_USB_CH_TH_PROT_R,
510 .flags = IORESOURCE_IRQ,
511 },
512 {
513 .name = "MAIN_CH_TH_PROT_R",
514 .start = AB8500_INT_MAIN_CH_TH_PROT_R,
515 .end = AB8500_INT_MAIN_CH_TH_PROT_R,
516 .flags = IORESOURCE_IRQ,
517 },
518 {
519 .name = "USB_CHARGER_NOT_OKF",
520 .start = AB8500_INT_USB_CHARGER_NOT_OKF,
521 .end = AB8500_INT_USB_CHARGER_NOT_OKF,
522 .flags = IORESOURCE_IRQ,
523 },
524};
525
526static struct resource ab8500_debug_resources[] = {
527 {
528 .name = "IRQ_FIRST",
529 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
530 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
531 .flags = IORESOURCE_IRQ,
532 },
533 {
534 .name = "IRQ_LAST",
535 .start = AB8500_INT_USB_CHARGER_NOT_OKF,
536 .end = AB8500_INT_USB_CHARGER_NOT_OKF,
537 .flags = IORESOURCE_IRQ,
538 },
539};
540
541static struct resource ab8500_usb_resources[] = {
542 {
543 .name = "ID_WAKEUP_R",
544 .start = AB8500_INT_ID_WAKEUP_R,
545 .end = AB8500_INT_ID_WAKEUP_R,
546 .flags = IORESOURCE_IRQ,
547 },
548 {
549 .name = "ID_WAKEUP_F",
550 .start = AB8500_INT_ID_WAKEUP_F,
551 .end = AB8500_INT_ID_WAKEUP_F,
552 .flags = IORESOURCE_IRQ,
553 },
554 {
555 .name = "VBUS_DET_F",
556 .start = AB8500_INT_VBUS_DET_F,
557 .end = AB8500_INT_VBUS_DET_F,
558 .flags = IORESOURCE_IRQ,
559 },
560 {
561 .name = "VBUS_DET_R",
562 .start = AB8500_INT_VBUS_DET_R,
563 .end = AB8500_INT_VBUS_DET_R,
564 .flags = IORESOURCE_IRQ,
565 },
566 {
567 .name = "USB_LINK_STATUS",
568 .start = AB8500_INT_USB_LINK_STATUS,
569 .end = AB8500_INT_USB_LINK_STATUS,
570 .flags = IORESOURCE_IRQ,
571 },
572};
573
574static struct resource ab8500_temp_resources[] = {
575 {
576 .name = "AB8500_TEMP_WARM",
577 .start = AB8500_INT_TEMP_WARM,
578 .end = AB8500_INT_TEMP_WARM,
579 .flags = IORESOURCE_IRQ,
580 },
581};
582
396static struct mfd_cell ab8500_devs[] = { 583static struct mfd_cell ab8500_devs[] = {
397#ifdef CONFIG_DEBUG_FS 584#ifdef CONFIG_DEBUG_FS
398 { 585 {
399 .name = "ab8500-debug", 586 .name = "ab8500-debug",
587 .num_resources = ARRAY_SIZE(ab8500_debug_resources),
588 .resources = ab8500_debug_resources,
400 }, 589 },
401#endif 590#endif
402 { 591 {
592 .name = "ab8500-sysctrl",
593 },
594 {
595 .name = "ab8500-regulator",
596 },
597 {
403 .name = "ab8500-gpadc", 598 .name = "ab8500-gpadc",
404 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), 599 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
405 .resources = ab8500_gpadc_resources, 600 .resources = ab8500_gpadc_resources,
@@ -410,6 +605,22 @@ static struct mfd_cell ab8500_devs[] = {
410 .resources = ab8500_rtc_resources, 605 .resources = ab8500_rtc_resources,
411 }, 606 },
412 { 607 {
608 .name = "ab8500-bm",
609 .num_resources = ARRAY_SIZE(ab8500_bm_resources),
610 .resources = ab8500_bm_resources,
611 },
612 { .name = "ab8500-codec", },
613 {
614 .name = "ab8500-usb",
615 .num_resources = ARRAY_SIZE(ab8500_usb_resources),
616 .resources = ab8500_usb_resources,
617 },
618 {
619 .name = "ab8500-poweron-key",
620 .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
621 .resources = ab8500_poweronkey_db_resources,
622 },
623 {
413 .name = "ab8500-pwm", 624 .name = "ab8500-pwm",
414 .id = 1, 625 .id = 1,
415 }, 626 },
@@ -421,17 +632,37 @@ static struct mfd_cell ab8500_devs[] = {
421 .name = "ab8500-pwm", 632 .name = "ab8500-pwm",
422 .id = 3, 633 .id = 3,
423 }, 634 },
424 { .name = "ab8500-charger", }, 635 { .name = "ab8500-leds", },
425 { .name = "ab8500-audio", },
426 { .name = "ab8500-usb", },
427 { .name = "ab8500-regulator", },
428 { 636 {
429 .name = "ab8500-poweron-key", 637 .name = "ab8500-denc",
430 .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources), 638 },
431 .resources = ab8500_poweronkey_db_resources, 639 {
640 .name = "ab8500-temp",
641 .num_resources = ARRAY_SIZE(ab8500_temp_resources),
642 .resources = ab8500_temp_resources,
432 }, 643 },
433}; 644};
434 645
646static ssize_t show_chip_id(struct device *dev,
647 struct device_attribute *attr, char *buf)
648{
649 struct ab8500 *ab8500;
650
651 ab8500 = dev_get_drvdata(dev);
652 return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
653}
654
655static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
656
657static struct attribute *ab8500_sysfs_entries[] = {
658 &dev_attr_chip_id.attr,
659 NULL,
660};
661
662static struct attribute_group ab8500_attr_group = {
663 .attrs = ab8500_sysfs_entries,
664};
665
435int __devinit ab8500_init(struct ab8500 *ab8500) 666int __devinit ab8500_init(struct ab8500 *ab8500)
436{ 667{
437 struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev); 668 struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
@@ -454,8 +685,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
454 * 0x0 - Early Drop 685 * 0x0 - Early Drop
455 * 0x10 - Cut 1.0 686 * 0x10 - Cut 1.0
456 * 0x11 - Cut 1.1 687 * 0x11 - Cut 1.1
688 * 0x20 - Cut 2.0
457 */ 689 */
458 if (value == 0x0 || value == 0x10 || value == 0x11) { 690 if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
459 ab8500->revision = value; 691 ab8500->revision = value;
460 dev_info(ab8500->dev, "detected chip, revision: %#x\n", value); 692 dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
461 } else { 693 } else {
@@ -468,18 +700,16 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
468 plat->init(ab8500); 700 plat->init(ab8500);
469 701
470 /* Clear and mask all interrupts */ 702 /* Clear and mask all interrupts */
471 for (i = 0; i < 10; i++) { 703 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
472 get_register_interruptible(ab8500, AB8500_INTERRUPT, 704 /* Interrupt register 12 does'nt exist prior to version 0x20 */
473 AB8500_IT_LATCH1_REG + i, &value); 705 if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
474 set_register_interruptible(ab8500, AB8500_INTERRUPT, 706 continue;
475 AB8500_IT_MASK1_REG + i, 0xff);
476 }
477 707
478 for (i = 18; i < 24; i++) {
479 get_register_interruptible(ab8500, AB8500_INTERRUPT, 708 get_register_interruptible(ab8500, AB8500_INTERRUPT,
480 AB8500_IT_LATCH1_REG + i, &value); 709 AB8500_IT_LATCH1_REG + ab8500_irq_regoffset[i],
710 &value);
481 set_register_interruptible(ab8500, AB8500_INTERRUPT, 711 set_register_interruptible(ab8500, AB8500_INTERRUPT,
482 AB8500_IT_MASK1_REG + i, 0xff); 712 AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i], 0xff);
483 } 713 }
484 714
485 ret = abx500_register_ops(ab8500->dev, &ab8500_ops); 715 ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
@@ -495,7 +725,8 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
495 return ret; 725 return ret;
496 726
497 ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq, 727 ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
498 IRQF_ONESHOT, "ab8500", ab8500); 728 IRQF_ONESHOT | IRQF_NO_SUSPEND,
729 "ab8500", ab8500);
499 if (ret) 730 if (ret)
500 goto out_removeirq; 731 goto out_removeirq;
501 } 732 }
@@ -506,6 +737,10 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
506 if (ret) 737 if (ret)
507 goto out_freeirq; 738 goto out_freeirq;
508 739
740 ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group);
741 if (ret)
742 dev_err(ab8500->dev, "error creating sysfs entries\n");
743
509 return ret; 744 return ret;
510 745
511out_freeirq: 746out_freeirq:
@@ -519,6 +754,7 @@ out_removeirq:
519 754
520int __devexit ab8500_exit(struct ab8500 *ab8500) 755int __devexit ab8500_exit(struct ab8500 *ab8500)
521{ 756{
757 sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
522 mfd_remove_devices(ab8500->dev); 758 mfd_remove_devices(ab8500->dev);
523 if (ab8500->irq_base) { 759 if (ab8500->irq_base) {
524 free_irq(ab8500->irq, ab8500); 760 free_irq(ab8500->irq, ab8500);
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d1e05a39815..3c1541ae7223 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -24,9 +24,9 @@ static u32 debug_address;
24 * @perm: access permissions for the range 24 * @perm: access permissions for the range
25 */ 25 */
26struct ab8500_reg_range { 26struct ab8500_reg_range {
27 u8 first; 27 u8 first;
28 u8 last; 28 u8 last;
29 u8 perm; 29 u8 perm;
30}; 30};
31 31
32/** 32/**
@@ -36,9 +36,9 @@ struct ab8500_reg_range {
36 * @range: the list of register ranges 36 * @range: the list of register ranges
37 */ 37 */
38struct ab8500_i2c_ranges { 38struct ab8500_i2c_ranges {
39 u8 num_ranges; 39 u8 num_ranges;
40 u8 bankid; 40 u8 bankid;
41 const struct ab8500_reg_range *range; 41 const struct ab8500_reg_range *range;
42}; 42};
43 43
44#define AB8500_NAME_STRING "ab8500" 44#define AB8500_NAME_STRING "ab8500"
@@ -47,521 +47,521 @@ struct ab8500_i2c_ranges {
47#define AB8500_REV_REG 0x80 47#define AB8500_REV_REG 0x80
48 48
49static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { 49static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
50 [0x0] = { 50 [0x0] = {
51 .num_ranges = 0, 51 .num_ranges = 0,
52 .range = 0, 52 .range = 0,
53 }, 53 },
54 [AB8500_SYS_CTRL1_BLOCK] = { 54 [AB8500_SYS_CTRL1_BLOCK] = {
55 .num_ranges = 3, 55 .num_ranges = 3,
56 .range = (struct ab8500_reg_range[]) { 56 .range = (struct ab8500_reg_range[]) {
57 { 57 {
58 .first = 0x00, 58 .first = 0x00,
59 .last = 0x02, 59 .last = 0x02,
60 }, 60 },
61 { 61 {
62 .first = 0x42, 62 .first = 0x42,
63 .last = 0x42, 63 .last = 0x42,
64 }, 64 },
65 { 65 {
66 .first = 0x80, 66 .first = 0x80,
67 .last = 0x81, 67 .last = 0x81,
68 }, 68 },
69 }, 69 },
70 }, 70 },
71 [AB8500_SYS_CTRL2_BLOCK] = { 71 [AB8500_SYS_CTRL2_BLOCK] = {
72 .num_ranges = 4, 72 .num_ranges = 4,
73 .range = (struct ab8500_reg_range[]) { 73 .range = (struct ab8500_reg_range[]) {
74 { 74 {
75 .first = 0x00, 75 .first = 0x00,
76 .last = 0x0D, 76 .last = 0x0D,
77 }, 77 },
78 { 78 {
79 .first = 0x0F, 79 .first = 0x0F,
80 .last = 0x17, 80 .last = 0x17,
81 }, 81 },
82 { 82 {
83 .first = 0x30, 83 .first = 0x30,
84 .last = 0x30, 84 .last = 0x30,
85 }, 85 },
86 { 86 {
87 .first = 0x32, 87 .first = 0x32,
88 .last = 0x33, 88 .last = 0x33,
89 }, 89 },
90 }, 90 },
91 }, 91 },
92 [AB8500_REGU_CTRL1] = { 92 [AB8500_REGU_CTRL1] = {
93 .num_ranges = 3, 93 .num_ranges = 3,
94 .range = (struct ab8500_reg_range[]) { 94 .range = (struct ab8500_reg_range[]) {
95 { 95 {
96 .first = 0x00, 96 .first = 0x00,
97 .last = 0x00, 97 .last = 0x00,
98 }, 98 },
99 { 99 {
100 .first = 0x03, 100 .first = 0x03,
101 .last = 0x10, 101 .last = 0x10,
102 }, 102 },
103 { 103 {
104 .first = 0x80, 104 .first = 0x80,
105 .last = 0x84, 105 .last = 0x84,
106 }, 106 },
107 }, 107 },
108 }, 108 },
109 [AB8500_REGU_CTRL2] = { 109 [AB8500_REGU_CTRL2] = {
110 .num_ranges = 5, 110 .num_ranges = 5,
111 .range = (struct ab8500_reg_range[]) { 111 .range = (struct ab8500_reg_range[]) {
112 { 112 {
113 .first = 0x00, 113 .first = 0x00,
114 .last = 0x15, 114 .last = 0x15,
115 }, 115 },
116 { 116 {
117 .first = 0x17, 117 .first = 0x17,
118 .last = 0x19, 118 .last = 0x19,
119 }, 119 },
120 { 120 {
121 .first = 0x1B, 121 .first = 0x1B,
122 .last = 0x1D, 122 .last = 0x1D,
123 }, 123 },
124 { 124 {
125 .first = 0x1F, 125 .first = 0x1F,
126 .last = 0x22, 126 .last = 0x22,
127 }, 127 },
128 { 128 {
129 .first = 0x40, 129 .first = 0x40,
130 .last = 0x44, 130 .last = 0x44,
131 }, 131 },
132 /* 0x80-0x8B is SIM registers and should 132 /* 0x80-0x8B is SIM registers and should
133 * not be accessed from here */ 133 * not be accessed from here */
134 }, 134 },
135 }, 135 },
136 [AB8500_USB] = { 136 [AB8500_USB] = {
137 .num_ranges = 2, 137 .num_ranges = 2,
138 .range = (struct ab8500_reg_range[]) { 138 .range = (struct ab8500_reg_range[]) {
139 { 139 {
140 .first = 0x80, 140 .first = 0x80,
141 .last = 0x83, 141 .last = 0x83,
142 }, 142 },
143 { 143 {
144 .first = 0x87, 144 .first = 0x87,
145 .last = 0x8A, 145 .last = 0x8A,
146 }, 146 },
147 }, 147 },
148 }, 148 },
149 [AB8500_TVOUT] = { 149 [AB8500_TVOUT] = {
150 .num_ranges = 9, 150 .num_ranges = 9,
151 .range = (struct ab8500_reg_range[]) { 151 .range = (struct ab8500_reg_range[]) {
152 { 152 {
153 .first = 0x00, 153 .first = 0x00,
154 .last = 0x12, 154 .last = 0x12,
155 }, 155 },
156 { 156 {
157 .first = 0x15, 157 .first = 0x15,
158 .last = 0x17, 158 .last = 0x17,
159 }, 159 },
160 { 160 {
161 .first = 0x19, 161 .first = 0x19,
162 .last = 0x21, 162 .last = 0x21,
163 }, 163 },
164 { 164 {
165 .first = 0x27, 165 .first = 0x27,
166 .last = 0x2C, 166 .last = 0x2C,
167 }, 167 },
168 { 168 {
169 .first = 0x41, 169 .first = 0x41,
170 .last = 0x41, 170 .last = 0x41,
171 }, 171 },
172 { 172 {
173 .first = 0x45, 173 .first = 0x45,
174 .last = 0x5B, 174 .last = 0x5B,
175 }, 175 },
176 { 176 {
177 .first = 0x5D, 177 .first = 0x5D,
178 .last = 0x5D, 178 .last = 0x5D,
179 }, 179 },
180 { 180 {
181 .first = 0x69, 181 .first = 0x69,
182 .last = 0x69, 182 .last = 0x69,
183 }, 183 },
184 { 184 {
185 .first = 0x80, 185 .first = 0x80,
186 .last = 0x81, 186 .last = 0x81,
187 }, 187 },
188 }, 188 },
189 }, 189 },
190 [AB8500_DBI] = { 190 [AB8500_DBI] = {
191 .num_ranges = 0, 191 .num_ranges = 0,
192 .range = 0, 192 .range = NULL,
193 }, 193 },
194 [AB8500_ECI_AV_ACC] = { 194 [AB8500_ECI_AV_ACC] = {
195 .num_ranges = 1, 195 .num_ranges = 1,
196 .range = (struct ab8500_reg_range[]) { 196 .range = (struct ab8500_reg_range[]) {
197 { 197 {
198 .first = 0x80, 198 .first = 0x80,
199 .last = 0x82, 199 .last = 0x82,
200 }, 200 },
201 }, 201 },
202 }, 202 },
203 [0x9] = { 203 [0x9] = {
204 .num_ranges = 0, 204 .num_ranges = 0,
205 .range = 0, 205 .range = NULL,
206 }, 206 },
207 [AB8500_GPADC] = { 207 [AB8500_GPADC] = {
208 .num_ranges = 1, 208 .num_ranges = 1,
209 .range = (struct ab8500_reg_range[]) { 209 .range = (struct ab8500_reg_range[]) {
210 { 210 {
211 .first = 0x00, 211 .first = 0x00,
212 .last = 0x08, 212 .last = 0x08,
213 }, 213 },
214 }, 214 },
215 }, 215 },
216 [AB8500_CHARGER] = { 216 [AB8500_CHARGER] = {
217 .num_ranges = 8, 217 .num_ranges = 8,
218 .range = (struct ab8500_reg_range[]) { 218 .range = (struct ab8500_reg_range[]) {
219 { 219 {
220 .first = 0x00, 220 .first = 0x00,
221 .last = 0x03, 221 .last = 0x03,
222 }, 222 },
223 { 223 {
224 .first = 0x05, 224 .first = 0x05,
225 .last = 0x05, 225 .last = 0x05,
226 }, 226 },
227 { 227 {
228 .first = 0x40, 228 .first = 0x40,
229 .last = 0x40, 229 .last = 0x40,
230 }, 230 },
231 { 231 {
232 .first = 0x42, 232 .first = 0x42,
233 .last = 0x42, 233 .last = 0x42,
234 }, 234 },
235 { 235 {
236 .first = 0x44, 236 .first = 0x44,
237 .last = 0x44, 237 .last = 0x44,
238 }, 238 },
239 { 239 {
240 .first = 0x50, 240 .first = 0x50,
241 .last = 0x55, 241 .last = 0x55,
242 }, 242 },
243 { 243 {
244 .first = 0x80, 244 .first = 0x80,
245 .last = 0x82, 245 .last = 0x82,
246 }, 246 },
247 { 247 {
248 .first = 0xC0, 248 .first = 0xC0,
249 .last = 0xC2, 249 .last = 0xC2,
250 }, 250 },
251 }, 251 },
252 }, 252 },
253 [AB8500_GAS_GAUGE] = { 253 [AB8500_GAS_GAUGE] = {
254 .num_ranges = 3, 254 .num_ranges = 3,
255 .range = (struct ab8500_reg_range[]) { 255 .range = (struct ab8500_reg_range[]) {
256 { 256 {
257 .first = 0x00, 257 .first = 0x00,
258 .last = 0x00, 258 .last = 0x00,
259 }, 259 },
260 { 260 {
261 .first = 0x07, 261 .first = 0x07,
262 .last = 0x0A, 262 .last = 0x0A,
263 }, 263 },
264 { 264 {
265 .first = 0x10, 265 .first = 0x10,
266 .last = 0x14, 266 .last = 0x14,
267 }, 267 },
268 }, 268 },
269 }, 269 },
270 [AB8500_AUDIO] = { 270 [AB8500_AUDIO] = {
271 .num_ranges = 1, 271 .num_ranges = 1,
272 .range = (struct ab8500_reg_range[]) { 272 .range = (struct ab8500_reg_range[]) {
273 { 273 {
274 .first = 0x00, 274 .first = 0x00,
275 .last = 0x6F, 275 .last = 0x6F,
276 }, 276 },
277 }, 277 },
278 }, 278 },
279 [AB8500_INTERRUPT] = { 279 [AB8500_INTERRUPT] = {
280 .num_ranges = 0, 280 .num_ranges = 0,
281 .range = 0, 281 .range = NULL,
282 }, 282 },
283 [AB8500_RTC] = { 283 [AB8500_RTC] = {
284 .num_ranges = 1, 284 .num_ranges = 1,
285 .range = (struct ab8500_reg_range[]) { 285 .range = (struct ab8500_reg_range[]) {
286 { 286 {
287 .first = 0x00, 287 .first = 0x00,
288 .last = 0x0F, 288 .last = 0x0F,
289 }, 289 },
290 }, 290 },
291 }, 291 },
292 [AB8500_MISC] = { 292 [AB8500_MISC] = {
293 .num_ranges = 8, 293 .num_ranges = 8,
294 .range = (struct ab8500_reg_range[]) { 294 .range = (struct ab8500_reg_range[]) {
295 { 295 {
296 .first = 0x00, 296 .first = 0x00,
297 .last = 0x05, 297 .last = 0x05,
298 }, 298 },
299 { 299 {
300 .first = 0x10, 300 .first = 0x10,
301 .last = 0x15, 301 .last = 0x15,
302 }, 302 },
303 { 303 {
304 .first = 0x20, 304 .first = 0x20,
305 .last = 0x25, 305 .last = 0x25,
306 }, 306 },
307 { 307 {
308 .first = 0x30, 308 .first = 0x30,
309 .last = 0x35, 309 .last = 0x35,
310 }, 310 },
311 { 311 {
312 .first = 0x40, 312 .first = 0x40,
313 .last = 0x45, 313 .last = 0x45,
314 }, 314 },
315 { 315 {
316 .first = 0x50, 316 .first = 0x50,
317 .last = 0x50, 317 .last = 0x50,
318 }, 318 },
319 { 319 {
320 .first = 0x60, 320 .first = 0x60,
321 .last = 0x67, 321 .last = 0x67,
322 }, 322 },
323 { 323 {
324 .first = 0x80, 324 .first = 0x80,
325 .last = 0x80, 325 .last = 0x80,
326 }, 326 },
327 }, 327 },
328 }, 328 },
329 [0x11] = { 329 [0x11] = {
330 .num_ranges = 0, 330 .num_ranges = 0,
331 .range = 0, 331 .range = NULL,
332 }, 332 },
333 [0x12] = { 333 [0x12] = {
334 .num_ranges = 0, 334 .num_ranges = 0,
335 .range = 0, 335 .range = NULL,
336 }, 336 },
337 [0x13] = { 337 [0x13] = {
338 .num_ranges = 0, 338 .num_ranges = 0,
339 .range = 0, 339 .range = NULL,
340 }, 340 },
341 [0x14] = { 341 [0x14] = {
342 .num_ranges = 0, 342 .num_ranges = 0,
343 .range = 0, 343 .range = NULL,
344 }, 344 },
345 [AB8500_OTP_EMUL] = { 345 [AB8500_OTP_EMUL] = {
346 .num_ranges = 1, 346 .num_ranges = 1,
347 .range = (struct ab8500_reg_range[]) { 347 .range = (struct ab8500_reg_range[]) {
348 { 348 {
349 .first = 0x01, 349 .first = 0x01,
350 .last = 0x0F, 350 .last = 0x0F,
351 }, 351 },
352 }, 352 },
353 }, 353 },
354}; 354};
355 355
356static int ab8500_registers_print(struct seq_file *s, void *p) 356static int ab8500_registers_print(struct seq_file *s, void *p)
357{ 357{
358 struct device *dev = s->private; 358 struct device *dev = s->private;
359 unsigned int i; 359 unsigned int i;
360 u32 bank = debug_bank; 360 u32 bank = debug_bank;
361 361
362 seq_printf(s, AB8500_NAME_STRING " register values:\n"); 362 seq_printf(s, AB8500_NAME_STRING " register values:\n");
363 363
364 seq_printf(s, " bank %u:\n", bank); 364 seq_printf(s, " bank %u:\n", bank);
365 for (i = 0; i < debug_ranges[bank].num_ranges; i++) { 365 for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
366 u32 reg; 366 u32 reg;
367 367
368 for (reg = debug_ranges[bank].range[i].first; 368 for (reg = debug_ranges[bank].range[i].first;
369 reg <= debug_ranges[bank].range[i].last; 369 reg <= debug_ranges[bank].range[i].last;
370 reg++) { 370 reg++) {
371 u8 value; 371 u8 value;
372 int err; 372 int err;
373 373
374 err = abx500_get_register_interruptible(dev, 374 err = abx500_get_register_interruptible(dev,
375 (u8)bank, (u8)reg, &value); 375 (u8)bank, (u8)reg, &value);
376 if (err < 0) { 376 if (err < 0) {
377 dev_err(dev, "ab->read fail %d\n", err); 377 dev_err(dev, "ab->read fail %d\n", err);
378 return err; 378 return err;
379 } 379 }
380 380
381 err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank, 381 err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank,
382 reg, value); 382 reg, value);
383 if (err < 0) { 383 if (err < 0) {
384 dev_err(dev, "seq_printf overflow\n"); 384 dev_err(dev, "seq_printf overflow\n");
385 /* Error is not returned here since 385 /* Error is not returned here since
386 * the output is wanted in any case */ 386 * the output is wanted in any case */
387 return 0; 387 return 0;
388 } 388 }
389 } 389 }
390 } 390 }
391 return 0; 391 return 0;
392} 392}
393 393
394static int ab8500_registers_open(struct inode *inode, struct file *file) 394static int ab8500_registers_open(struct inode *inode, struct file *file)
395{ 395{
396 return single_open(file, ab8500_registers_print, inode->i_private); 396 return single_open(file, ab8500_registers_print, inode->i_private);
397} 397}
398 398
399static const struct file_operations ab8500_registers_fops = { 399static const struct file_operations ab8500_registers_fops = {
400 .open = ab8500_registers_open, 400 .open = ab8500_registers_open,
401 .read = seq_read, 401 .read = seq_read,
402 .llseek = seq_lseek, 402 .llseek = seq_lseek,
403 .release = single_release, 403 .release = single_release,
404 .owner = THIS_MODULE, 404 .owner = THIS_MODULE,
405}; 405};
406 406
407static int ab8500_bank_print(struct seq_file *s, void *p) 407static int ab8500_bank_print(struct seq_file *s, void *p)
408{ 408{
409 return seq_printf(s, "%d\n", debug_bank); 409 return seq_printf(s, "%d\n", debug_bank);
410} 410}
411 411
412static int ab8500_bank_open(struct inode *inode, struct file *file) 412static int ab8500_bank_open(struct inode *inode, struct file *file)
413{ 413{
414 return single_open(file, ab8500_bank_print, inode->i_private); 414 return single_open(file, ab8500_bank_print, inode->i_private);
415} 415}
416 416
417static ssize_t ab8500_bank_write(struct file *file, 417static ssize_t ab8500_bank_write(struct file *file,
418 const char __user *user_buf, 418 const char __user *user_buf,
419 size_t count, loff_t *ppos) 419 size_t count, loff_t *ppos)
420{ 420{
421 struct device *dev = ((struct seq_file *)(file->private_data))->private; 421 struct device *dev = ((struct seq_file *)(file->private_data))->private;
422 char buf[32]; 422 char buf[32];
423 int buf_size; 423 int buf_size;
424 unsigned long user_bank; 424 unsigned long user_bank;
425 int err; 425 int err;
426 426
427 /* Get userspace string and assure termination */ 427 /* Get userspace string and assure termination */
428 buf_size = min(count, (sizeof(buf) - 1)); 428 buf_size = min(count, (sizeof(buf) - 1));
429 if (copy_from_user(buf, user_buf, buf_size)) 429 if (copy_from_user(buf, user_buf, buf_size))
430 return -EFAULT; 430 return -EFAULT;
431 buf[buf_size] = 0; 431 buf[buf_size] = 0;
432 432
433 err = strict_strtoul(buf, 0, &user_bank); 433 err = strict_strtoul(buf, 0, &user_bank);
434 if (err) 434 if (err)
435 return -EINVAL; 435 return -EINVAL;
436 436
437 if (user_bank >= AB8500_NUM_BANKS) { 437 if (user_bank >= AB8500_NUM_BANKS) {
438 dev_err(dev, "debugfs error input > number of banks\n"); 438 dev_err(dev, "debugfs error input > number of banks\n");
439 return -EINVAL; 439 return -EINVAL;
440 } 440 }
441 441
442 debug_bank = user_bank; 442 debug_bank = user_bank;
443 443
444 return buf_size; 444 return buf_size;
445} 445}
446 446
447static int ab8500_address_print(struct seq_file *s, void *p) 447static int ab8500_address_print(struct seq_file *s, void *p)
448{ 448{
449 return seq_printf(s, "0x%02X\n", debug_address); 449 return seq_printf(s, "0x%02X\n", debug_address);
450} 450}
451 451
452static int ab8500_address_open(struct inode *inode, struct file *file) 452static int ab8500_address_open(struct inode *inode, struct file *file)
453{ 453{
454 return single_open(file, ab8500_address_print, inode->i_private); 454 return single_open(file, ab8500_address_print, inode->i_private);
455} 455}
456 456
457static ssize_t ab8500_address_write(struct file *file, 457static ssize_t ab8500_address_write(struct file *file,
458 const char __user *user_buf, 458 const char __user *user_buf,
459 size_t count, loff_t *ppos) 459 size_t count, loff_t *ppos)
460{ 460{
461 struct device *dev = ((struct seq_file *)(file->private_data))->private; 461 struct device *dev = ((struct seq_file *)(file->private_data))->private;
462 char buf[32]; 462 char buf[32];
463 int buf_size; 463 int buf_size;
464 unsigned long user_address; 464 unsigned long user_address;
465 int err; 465 int err;
466 466
467 /* Get userspace string and assure termination */ 467 /* Get userspace string and assure termination */
468 buf_size = min(count, (sizeof(buf) - 1)); 468 buf_size = min(count, (sizeof(buf) - 1));
469 if (copy_from_user(buf, user_buf, buf_size)) 469 if (copy_from_user(buf, user_buf, buf_size))
470 return -EFAULT; 470 return -EFAULT;
471 buf[buf_size] = 0; 471 buf[buf_size] = 0;
472 472
473 err = strict_strtoul(buf, 0, &user_address); 473 err = strict_strtoul(buf, 0, &user_address);
474 if (err) 474 if (err)
475 return -EINVAL; 475 return -EINVAL;
476 if (user_address > 0xff) { 476 if (user_address > 0xff) {
477 dev_err(dev, "debugfs error input > 0xff\n"); 477 dev_err(dev, "debugfs error input > 0xff\n");
478 return -EINVAL; 478 return -EINVAL;
479 } 479 }
480 debug_address = user_address; 480 debug_address = user_address;
481 return buf_size; 481 return buf_size;
482} 482}
483 483
484static int ab8500_val_print(struct seq_file *s, void *p) 484static int ab8500_val_print(struct seq_file *s, void *p)
485{ 485{
486 struct device *dev = s->private; 486 struct device *dev = s->private;
487 int ret; 487 int ret;
488 u8 regvalue; 488 u8 regvalue;
489 489
490 ret = abx500_get_register_interruptible(dev, 490 ret = abx500_get_register_interruptible(dev,
491 (u8)debug_bank, (u8)debug_address, &regvalue); 491 (u8)debug_bank, (u8)debug_address, &regvalue);
492 if (ret < 0) { 492 if (ret < 0) {
493 dev_err(dev, "abx500_get_reg fail %d, %d\n", 493 dev_err(dev, "abx500_get_reg fail %d, %d\n",
494 ret, __LINE__); 494 ret, __LINE__);
495 return -EINVAL; 495 return -EINVAL;
496 } 496 }
497 seq_printf(s, "0x%02X\n", regvalue); 497 seq_printf(s, "0x%02X\n", regvalue);
498 498
499 return 0; 499 return 0;
500} 500}
501 501
502static int ab8500_val_open(struct inode *inode, struct file *file) 502static int ab8500_val_open(struct inode *inode, struct file *file)
503{ 503{
504 return single_open(file, ab8500_val_print, inode->i_private); 504 return single_open(file, ab8500_val_print, inode->i_private);
505} 505}
506 506
507static ssize_t ab8500_val_write(struct file *file, 507static ssize_t ab8500_val_write(struct file *file,
508 const char __user *user_buf, 508 const char __user *user_buf,
509 size_t count, loff_t *ppos) 509 size_t count, loff_t *ppos)
510{ 510{
511 struct device *dev = ((struct seq_file *)(file->private_data))->private; 511 struct device *dev = ((struct seq_file *)(file->private_data))->private;
512 char buf[32]; 512 char buf[32];
513 int buf_size; 513 int buf_size;
514 unsigned long user_val; 514 unsigned long user_val;
515 int err; 515 int err;
516 516
517 /* Get userspace string and assure termination */ 517 /* Get userspace string and assure termination */
518 buf_size = min(count, (sizeof(buf)-1)); 518 buf_size = min(count, (sizeof(buf)-1));
519 if (copy_from_user(buf, user_buf, buf_size)) 519 if (copy_from_user(buf, user_buf, buf_size))
520 return -EFAULT; 520 return -EFAULT;
521 buf[buf_size] = 0; 521 buf[buf_size] = 0;
522 522
523 err = strict_strtoul(buf, 0, &user_val); 523 err = strict_strtoul(buf, 0, &user_val);
524 if (err) 524 if (err)
525 return -EINVAL; 525 return -EINVAL;
526 if (user_val > 0xff) { 526 if (user_val > 0xff) {
527 dev_err(dev, "debugfs error input > 0xff\n"); 527 dev_err(dev, "debugfs error input > 0xff\n");
528 return -EINVAL; 528 return -EINVAL;
529 } 529 }
530 err = abx500_set_register_interruptible(dev, 530 err = abx500_set_register_interruptible(dev,
531 (u8)debug_bank, debug_address, (u8)user_val); 531 (u8)debug_bank, debug_address, (u8)user_val);
532 if (err < 0) { 532 if (err < 0) {
533 printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__); 533 printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
534 return -EINVAL; 534 return -EINVAL;
535 } 535 }
536 536
537 return buf_size; 537 return buf_size;
538} 538}
539 539
540static const struct file_operations ab8500_bank_fops = { 540static const struct file_operations ab8500_bank_fops = {
541 .open = ab8500_bank_open, 541 .open = ab8500_bank_open,
542 .write = ab8500_bank_write, 542 .write = ab8500_bank_write,
543 .read = seq_read, 543 .read = seq_read,
544 .llseek = seq_lseek, 544 .llseek = seq_lseek,
545 .release = single_release, 545 .release = single_release,
546 .owner = THIS_MODULE, 546 .owner = THIS_MODULE,
547}; 547};
548 548
549static const struct file_operations ab8500_address_fops = { 549static const struct file_operations ab8500_address_fops = {
550 .open = ab8500_address_open, 550 .open = ab8500_address_open,
551 .write = ab8500_address_write, 551 .write = ab8500_address_write,
552 .read = seq_read, 552 .read = seq_read,
553 .llseek = seq_lseek, 553 .llseek = seq_lseek,
554 .release = single_release, 554 .release = single_release,
555 .owner = THIS_MODULE, 555 .owner = THIS_MODULE,
556}; 556};
557 557
558static const struct file_operations ab8500_val_fops = { 558static const struct file_operations ab8500_val_fops = {
559 .open = ab8500_val_open, 559 .open = ab8500_val_open,
560 .write = ab8500_val_write, 560 .write = ab8500_val_write,
561 .read = seq_read, 561 .read = seq_read,
562 .llseek = seq_lseek, 562 .llseek = seq_lseek,
563 .release = single_release, 563 .release = single_release,
564 .owner = THIS_MODULE, 564 .owner = THIS_MODULE,
565}; 565};
566 566
567static struct dentry *ab8500_dir; 567static struct dentry *ab8500_dir;
@@ -572,77 +572,77 @@ static struct dentry *ab8500_val_file;
572 572
573static int __devinit ab8500_debug_probe(struct platform_device *plf) 573static int __devinit ab8500_debug_probe(struct platform_device *plf)
574{ 574{
575 debug_bank = AB8500_MISC; 575 debug_bank = AB8500_MISC;
576 debug_address = AB8500_REV_REG & 0x00FF; 576 debug_address = AB8500_REV_REG & 0x00FF;
577 577
578 ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL); 578 ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
579 if (!ab8500_dir) 579 if (!ab8500_dir)
580 goto exit_no_debugfs; 580 goto exit_no_debugfs;
581 581
582 ab8500_reg_file = debugfs_create_file("all-bank-registers", 582 ab8500_reg_file = debugfs_create_file("all-bank-registers",
583 S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops); 583 S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
584 if (!ab8500_reg_file) 584 if (!ab8500_reg_file)
585 goto exit_destroy_dir; 585 goto exit_destroy_dir;
586 586
587 ab8500_bank_file = debugfs_create_file("register-bank", 587 ab8500_bank_file = debugfs_create_file("register-bank",
588 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); 588 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
589 if (!ab8500_bank_file) 589 if (!ab8500_bank_file)
590 goto exit_destroy_reg; 590 goto exit_destroy_reg;
591 591
592 ab8500_address_file = debugfs_create_file("register-address", 592 ab8500_address_file = debugfs_create_file("register-address",
593 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, 593 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
594 &ab8500_address_fops); 594 &ab8500_address_fops);
595 if (!ab8500_address_file) 595 if (!ab8500_address_file)
596 goto exit_destroy_bank; 596 goto exit_destroy_bank;
597 597
598 ab8500_val_file = debugfs_create_file("register-value", 598 ab8500_val_file = debugfs_create_file("register-value",
599 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); 599 (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
600 if (!ab8500_val_file) 600 if (!ab8500_val_file)
601 goto exit_destroy_address; 601 goto exit_destroy_address;
602 602
603 return 0; 603 return 0;
604 604
605exit_destroy_address: 605exit_destroy_address:
606 debugfs_remove(ab8500_address_file); 606 debugfs_remove(ab8500_address_file);
607exit_destroy_bank: 607exit_destroy_bank:
608 debugfs_remove(ab8500_bank_file); 608 debugfs_remove(ab8500_bank_file);
609exit_destroy_reg: 609exit_destroy_reg:
610 debugfs_remove(ab8500_reg_file); 610 debugfs_remove(ab8500_reg_file);
611exit_destroy_dir: 611exit_destroy_dir:
612 debugfs_remove(ab8500_dir); 612 debugfs_remove(ab8500_dir);
613exit_no_debugfs: 613exit_no_debugfs:
614 dev_err(&plf->dev, "failed to create debugfs entries.\n"); 614 dev_err(&plf->dev, "failed to create debugfs entries.\n");
615 return -ENOMEM; 615 return -ENOMEM;
616} 616}
617 617
618static int __devexit ab8500_debug_remove(struct platform_device *plf) 618static int __devexit ab8500_debug_remove(struct platform_device *plf)
619{ 619{
620 debugfs_remove(ab8500_val_file); 620 debugfs_remove(ab8500_val_file);
621 debugfs_remove(ab8500_address_file); 621 debugfs_remove(ab8500_address_file);
622 debugfs_remove(ab8500_bank_file); 622 debugfs_remove(ab8500_bank_file);
623 debugfs_remove(ab8500_reg_file); 623 debugfs_remove(ab8500_reg_file);
624 debugfs_remove(ab8500_dir); 624 debugfs_remove(ab8500_dir);
625 625
626 return 0; 626 return 0;
627} 627}
628 628
629static struct platform_driver ab8500_debug_driver = { 629static struct platform_driver ab8500_debug_driver = {
630 .driver = { 630 .driver = {
631 .name = "ab8500-debug", 631 .name = "ab8500-debug",
632 .owner = THIS_MODULE, 632 .owner = THIS_MODULE,
633 }, 633 },
634 .probe = ab8500_debug_probe, 634 .probe = ab8500_debug_probe,
635 .remove = __devexit_p(ab8500_debug_remove) 635 .remove = __devexit_p(ab8500_debug_remove)
636}; 636};
637 637
638static int __init ab8500_debug_init(void) 638static int __init ab8500_debug_init(void)
639{ 639{
640 return platform_driver_register(&ab8500_debug_driver); 640 return platform_driver_register(&ab8500_debug_driver);
641} 641}
642 642
643static void __exit ab8500_debug_exit(void) 643static void __exit ab8500_debug_exit(void)
644{ 644{
645 platform_driver_unregister(&ab8500_debug_driver); 645 platform_driver_unregister(&ab8500_debug_driver);
646} 646}
647subsys_initcall(ab8500_debug_init); 647subsys_initcall(ab8500_debug_init);
648module_exit(ab8500_debug_exit); 648module_exit(ab8500_debug_exit);
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
deleted file mode 100644
index b1653421edb5..000000000000
--- a/drivers/mfd/ab8500-spi.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/spi/spi.h>
14#include <linux/mfd/ab8500.h>
15
16/*
17 * This funtion writes to any AB8500 registers using
18 * SPI protocol & before it writes it packs the data
19 * in the below 24 bit frame format
20 *
21 * *|------------------------------------|
22 * *| 23|22...18|17.......10|9|8|7......0|
23 * *| r/w bank adr data |
24 * * ------------------------------------
25 *
26 * This function shouldn't be called from interrupt
27 * context
28 */
29static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
30{
31 struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
32 dev);
33 unsigned long spi_data = addr << 10 | data;
34 struct spi_transfer xfer;
35 struct spi_message msg;
36
37 ab8500->tx_buf[0] = spi_data;
38 ab8500->rx_buf[0] = 0;
39
40 xfer.tx_buf = ab8500->tx_buf;
41 xfer.rx_buf = NULL;
42 xfer.len = sizeof(unsigned long);
43
44 spi_message_init(&msg);
45 spi_message_add_tail(&xfer, &msg);
46
47 return spi_sync(spi, &msg);
48}
49
50static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
51{
52 struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
53 dev);
54 unsigned long spi_data = 1 << 23 | addr << 10;
55 struct spi_transfer xfer;
56 struct spi_message msg;
57 int ret;
58
59 ab8500->tx_buf[0] = spi_data;
60 ab8500->rx_buf[0] = 0;
61
62 xfer.tx_buf = ab8500->tx_buf;
63 xfer.rx_buf = ab8500->rx_buf;
64 xfer.len = sizeof(unsigned long);
65
66 spi_message_init(&msg);
67 spi_message_add_tail(&xfer, &msg);
68
69 ret = spi_sync(spi, &msg);
70 if (!ret)
71 /*
72 * Only the 8 lowermost bytes are
73 * defined with value, the rest may
74 * vary depending on chip/board noise.
75 */
76 ret = ab8500->rx_buf[0] & 0xFFU;
77
78 return ret;
79}
80
81static int __devinit ab8500_spi_probe(struct spi_device *spi)
82{
83 struct ab8500 *ab8500;
84 int ret;
85
86 spi->bits_per_word = 24;
87 ret = spi_setup(spi);
88 if (ret < 0)
89 return ret;
90
91 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
92 if (!ab8500)
93 return -ENOMEM;
94
95 ab8500->dev = &spi->dev;
96 ab8500->irq = spi->irq;
97
98 ab8500->read = ab8500_spi_read;
99 ab8500->write = ab8500_spi_write;
100
101 spi_set_drvdata(spi, ab8500);
102
103 ret = ab8500_init(ab8500);
104 if (ret)
105 kfree(ab8500);
106
107 return ret;
108}
109
110static int __devexit ab8500_spi_remove(struct spi_device *spi)
111{
112 struct ab8500 *ab8500 = spi_get_drvdata(spi);
113
114 ab8500_exit(ab8500);
115 kfree(ab8500);
116
117 return 0;
118}
119
120static struct spi_driver ab8500_spi_driver = {
121 .driver = {
122 .name = "ab8500-spi",
123 .owner = THIS_MODULE,
124 },
125 .probe = ab8500_spi_probe,
126 .remove = __devexit_p(ab8500_spi_remove)
127};
128
129static int __init ab8500_spi_init(void)
130{
131 return spi_register_driver(&ab8500_spi_driver);
132}
133subsys_initcall(ab8500_spi_init);
134
135static void __exit ab8500_spi_exit(void)
136{
137 spi_unregister_driver(&ab8500_spi_driver);
138}
139module_exit(ab8500_spi_exit);
140
141MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
142MODULE_DESCRIPTION("AB8500 SPI");
143MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 7de708d15d72..6a1f94042612 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -57,7 +57,7 @@ struct asic3_clk {
57 .rate = _rate, \ 57 .rate = _rate, \
58 } 58 }
59 59
60struct asic3_clk asic3_clk_init[] __initdata = { 60static struct asic3_clk asic3_clk_init[] __initdata = {
61 INIT_CDEX(SPI, 0), 61 INIT_CDEX(SPI, 0),
62 INIT_CDEX(OWM, 5000000), 62 INIT_CDEX(OWM, 5000000),
63 INIT_CDEX(PWM0, 0), 63 INIT_CDEX(PWM0, 0),
@@ -102,7 +102,7 @@ static inline u32 asic3_read_register(struct asic3 *asic,
102 (reg >> asic->bus_shift)); 102 (reg >> asic->bus_shift));
103} 103}
104 104
105void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) 105static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
106{ 106{
107 unsigned long flags; 107 unsigned long flags;
108 u32 val; 108 u32 val;
@@ -226,14 +226,14 @@ static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
226 return (irq - asic->irq_base) & 0xf; 226 return (irq - asic->irq_base) & 0xf;
227} 227}
228 228
229static void asic3_mask_gpio_irq(unsigned int irq) 229static void asic3_mask_gpio_irq(struct irq_data *data)
230{ 230{
231 struct asic3 *asic = get_irq_chip_data(irq); 231 struct asic3 *asic = irq_data_get_irq_chip_data(data);
232 u32 val, bank, index; 232 u32 val, bank, index;
233 unsigned long flags; 233 unsigned long flags;
234 234
235 bank = asic3_irq_to_bank(asic, irq); 235 bank = asic3_irq_to_bank(asic, data->irq);
236 index = asic3_irq_to_index(asic, irq); 236 index = asic3_irq_to_index(asic, data->irq);
237 237
238 spin_lock_irqsave(&asic->lock, flags); 238 spin_lock_irqsave(&asic->lock, flags);
239 val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK); 239 val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -242,9 +242,9 @@ static void asic3_mask_gpio_irq(unsigned int irq)
242 spin_unlock_irqrestore(&asic->lock, flags); 242 spin_unlock_irqrestore(&asic->lock, flags);
243} 243}
244 244
245static void asic3_mask_irq(unsigned int irq) 245static void asic3_mask_irq(struct irq_data *data)
246{ 246{
247 struct asic3 *asic = get_irq_chip_data(irq); 247 struct asic3 *asic = irq_data_get_irq_chip_data(data);
248 int regval; 248 int regval;
249 unsigned long flags; 249 unsigned long flags;
250 250
@@ -254,7 +254,7 @@ static void asic3_mask_irq(unsigned int irq)
254 ASIC3_INTR_INT_MASK); 254 ASIC3_INTR_INT_MASK);
255 255
256 regval &= ~(ASIC3_INTMASK_MASK0 << 256 regval &= ~(ASIC3_INTMASK_MASK0 <<
257 (irq - (asic->irq_base + ASIC3_NUM_GPIOS))); 257 (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
258 258
259 asic3_write_register(asic, 259 asic3_write_register(asic,
260 ASIC3_INTR_BASE + 260 ASIC3_INTR_BASE +
@@ -263,14 +263,14 @@ static void asic3_mask_irq(unsigned int irq)
263 spin_unlock_irqrestore(&asic->lock, flags); 263 spin_unlock_irqrestore(&asic->lock, flags);
264} 264}
265 265
266static void asic3_unmask_gpio_irq(unsigned int irq) 266static void asic3_unmask_gpio_irq(struct irq_data *data)
267{ 267{
268 struct asic3 *asic = get_irq_chip_data(irq); 268 struct asic3 *asic = irq_data_get_irq_chip_data(data);
269 u32 val, bank, index; 269 u32 val, bank, index;
270 unsigned long flags; 270 unsigned long flags;
271 271
272 bank = asic3_irq_to_bank(asic, irq); 272 bank = asic3_irq_to_bank(asic, data->irq);
273 index = asic3_irq_to_index(asic, irq); 273 index = asic3_irq_to_index(asic, data->irq);
274 274
275 spin_lock_irqsave(&asic->lock, flags); 275 spin_lock_irqsave(&asic->lock, flags);
276 val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK); 276 val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -279,9 +279,9 @@ static void asic3_unmask_gpio_irq(unsigned int irq)
279 spin_unlock_irqrestore(&asic->lock, flags); 279 spin_unlock_irqrestore(&asic->lock, flags);
280} 280}
281 281
282static void asic3_unmask_irq(unsigned int irq) 282static void asic3_unmask_irq(struct irq_data *data)
283{ 283{
284 struct asic3 *asic = get_irq_chip_data(irq); 284 struct asic3 *asic = irq_data_get_irq_chip_data(data);
285 int regval; 285 int regval;
286 unsigned long flags; 286 unsigned long flags;
287 287
@@ -291,7 +291,7 @@ static void asic3_unmask_irq(unsigned int irq)
291 ASIC3_INTR_INT_MASK); 291 ASIC3_INTR_INT_MASK);
292 292
293 regval |= (ASIC3_INTMASK_MASK0 << 293 regval |= (ASIC3_INTMASK_MASK0 <<
294 (irq - (asic->irq_base + ASIC3_NUM_GPIOS))); 294 (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
295 295
296 asic3_write_register(asic, 296 asic3_write_register(asic,
297 ASIC3_INTR_BASE + 297 ASIC3_INTR_BASE +
@@ -300,15 +300,15 @@ static void asic3_unmask_irq(unsigned int irq)
300 spin_unlock_irqrestore(&asic->lock, flags); 300 spin_unlock_irqrestore(&asic->lock, flags);
301} 301}
302 302
303static int asic3_gpio_irq_type(unsigned int irq, unsigned int type) 303static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
304{ 304{
305 struct asic3 *asic = get_irq_chip_data(irq); 305 struct asic3 *asic = irq_data_get_irq_chip_data(data);
306 u32 bank, index; 306 u32 bank, index;
307 u16 trigger, level, edge, bit; 307 u16 trigger, level, edge, bit;
308 unsigned long flags; 308 unsigned long flags;
309 309
310 bank = asic3_irq_to_bank(asic, irq); 310 bank = asic3_irq_to_bank(asic, data->irq);
311 index = asic3_irq_to_index(asic, irq); 311 index = asic3_irq_to_index(asic, data->irq);
312 bit = 1<<index; 312 bit = 1<<index;
313 313
314 spin_lock_irqsave(&asic->lock, flags); 314 spin_lock_irqsave(&asic->lock, flags);
@@ -318,7 +318,7 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
318 bank + ASIC3_GPIO_EDGE_TRIGGER); 318 bank + ASIC3_GPIO_EDGE_TRIGGER);
319 trigger = asic3_read_register(asic, 319 trigger = asic3_read_register(asic,
320 bank + ASIC3_GPIO_TRIGGER_TYPE); 320 bank + ASIC3_GPIO_TRIGGER_TYPE);
321 asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit; 321 asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
322 322
323 if (type == IRQ_TYPE_EDGE_RISING) { 323 if (type == IRQ_TYPE_EDGE_RISING) {
324 trigger |= bit; 324 trigger |= bit;
@@ -328,11 +328,11 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
328 edge &= ~bit; 328 edge &= ~bit;
329 } else if (type == IRQ_TYPE_EDGE_BOTH) { 329 } else if (type == IRQ_TYPE_EDGE_BOTH) {
330 trigger |= bit; 330 trigger |= bit;
331 if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base)) 331 if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
332 edge &= ~bit; 332 edge &= ~bit;
333 else 333 else
334 edge |= bit; 334 edge |= bit;
335 asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit; 335 asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
336 } else if (type == IRQ_TYPE_LEVEL_LOW) { 336 } else if (type == IRQ_TYPE_LEVEL_LOW) {
337 trigger &= ~bit; 337 trigger &= ~bit;
338 level &= ~bit; 338 level &= ~bit;
@@ -359,17 +359,17 @@ static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
359 359
360static struct irq_chip asic3_gpio_irq_chip = { 360static struct irq_chip asic3_gpio_irq_chip = {
361 .name = "ASIC3-GPIO", 361 .name = "ASIC3-GPIO",
362 .ack = asic3_mask_gpio_irq, 362 .irq_ack = asic3_mask_gpio_irq,
363 .mask = asic3_mask_gpio_irq, 363 .irq_mask = asic3_mask_gpio_irq,
364 .unmask = asic3_unmask_gpio_irq, 364 .irq_unmask = asic3_unmask_gpio_irq,
365 .set_type = asic3_gpio_irq_type, 365 .irq_set_type = asic3_gpio_irq_type,
366}; 366};
367 367
368static struct irq_chip asic3_irq_chip = { 368static struct irq_chip asic3_irq_chip = {
369 .name = "ASIC3", 369 .name = "ASIC3",
370 .ack = asic3_mask_irq, 370 .irq_ack = asic3_mask_irq,
371 .mask = asic3_mask_irq, 371 .irq_mask = asic3_mask_irq,
372 .unmask = asic3_unmask_irq, 372 .irq_unmask = asic3_unmask_irq,
373}; 373};
374 374
375static int __init asic3_irq_probe(struct platform_device *pdev) 375static int __init asic3_irq_probe(struct platform_device *pdev)
@@ -635,7 +635,7 @@ static struct resource ds1wm_resources[] = {
635 }, 635 },
636 { 636 {
637 .start = ASIC3_IRQ_OWM, 637 .start = ASIC3_IRQ_OWM,
638 .start = ASIC3_IRQ_OWM, 638 .end = ASIC3_IRQ_OWM,
639 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, 639 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
640 }, 640 },
641}; 641};
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
new file mode 100644
index 000000000000..59ca6f151e78
--- /dev/null
+++ b/drivers/mfd/cs5535-mfd.c
@@ -0,0 +1,151 @@
1/*
2 * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges
3 *
4 * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is
5 * used for accessing GPIOs, MFGPTs, ACPI, etc. Each subdevice has
6 * an IO range that's specified in a single BAR. The BAR order is
7 * hardcoded in the CS553x specifications.
8 *
9 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/mfd/core.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#define DRV_NAME "cs5535-mfd"
32
33enum cs5535_mfd_bars {
34 SMB_BAR = 0,
35 GPIO_BAR = 1,
36 MFGPT_BAR = 2,
37 PMS_BAR = 4,
38 ACPI_BAR = 5,
39 NR_BARS,
40};
41
42static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
43
44static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
45 {
46 .id = SMB_BAR,
47 .name = "cs5535-smb",
48 .num_resources = 1,
49 .resources = &cs5535_mfd_resources[SMB_BAR],
50 },
51 {
52 .id = GPIO_BAR,
53 .name = "cs5535-gpio",
54 .num_resources = 1,
55 .resources = &cs5535_mfd_resources[GPIO_BAR],
56 },
57 {
58 .id = MFGPT_BAR,
59 .name = "cs5535-mfgpt",
60 .num_resources = 1,
61 .resources = &cs5535_mfd_resources[MFGPT_BAR],
62 },
63 {
64 .id = PMS_BAR,
65 .name = "cs5535-pms",
66 .num_resources = 1,
67 .resources = &cs5535_mfd_resources[PMS_BAR],
68 },
69 {
70 .id = ACPI_BAR,
71 .name = "cs5535-acpi",
72 .num_resources = 1,
73 .resources = &cs5535_mfd_resources[ACPI_BAR],
74 },
75};
76
77static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
78 const struct pci_device_id *id)
79{
80 int err, i;
81
82 err = pci_enable_device(pdev);
83 if (err)
84 return err;
85
86 /* fill in IO range for each cell; subdrivers handle the region */
87 for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
88 int bar = cs5535_mfd_cells[i].id;
89 struct resource *r = &cs5535_mfd_resources[bar];
90
91 r->flags = IORESOURCE_IO;
92 r->start = pci_resource_start(pdev, bar);
93 r->end = pci_resource_end(pdev, bar);
94
95 /* id is used for temporarily storing BAR; unset it now */
96 cs5535_mfd_cells[i].id = 0;
97 }
98
99 err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
100 ARRAY_SIZE(cs5535_mfd_cells), NULL, 0);
101 if (err) {
102 dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
103 goto err_disable;
104 }
105
106 dev_info(&pdev->dev, "%zu devices registered.\n",
107 ARRAY_SIZE(cs5535_mfd_cells));
108
109 return 0;
110
111err_disable:
112 pci_disable_device(pdev);
113 return err;
114}
115
116static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
117{
118 mfd_remove_devices(&pdev->dev);
119 pci_disable_device(pdev);
120}
121
122static struct pci_device_id cs5535_mfd_pci_tbl[] = {
123 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
124 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
125 { 0, }
126};
127MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
128
129static struct pci_driver cs5535_mfd_drv = {
130 .name = DRV_NAME,
131 .id_table = cs5535_mfd_pci_tbl,
132 .probe = cs5535_mfd_probe,
133 .remove = __devexit_p(cs5535_mfd_remove),
134};
135
136static int __init cs5535_mfd_init(void)
137{
138 return pci_register_driver(&cs5535_mfd_drv);
139}
140
141static void __exit cs5535_mfd_exit(void)
142{
143 pci_unregister_driver(&cs5535_mfd_drv);
144}
145
146module_init(cs5535_mfd_init);
147module_exit(cs5535_mfd_exit);
148
149MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
150MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
151MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index c2b698d69a93..9e2d8dd5f9e5 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -144,26 +144,26 @@ int pcap_to_irq(struct pcap_chip *pcap, int irq)
144} 144}
145EXPORT_SYMBOL_GPL(pcap_to_irq); 145EXPORT_SYMBOL_GPL(pcap_to_irq);
146 146
147static void pcap_mask_irq(unsigned int irq) 147static void pcap_mask_irq(struct irq_data *d)
148{ 148{
149 struct pcap_chip *pcap = get_irq_chip_data(irq); 149 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
150 150
151 pcap->msr |= 1 << irq_to_pcap(pcap, irq); 151 pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
152 queue_work(pcap->workqueue, &pcap->msr_work); 152 queue_work(pcap->workqueue, &pcap->msr_work);
153} 153}
154 154
155static void pcap_unmask_irq(unsigned int irq) 155static void pcap_unmask_irq(struct irq_data *d)
156{ 156{
157 struct pcap_chip *pcap = get_irq_chip_data(irq); 157 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
158 158
159 pcap->msr &= ~(1 << irq_to_pcap(pcap, irq)); 159 pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
160 queue_work(pcap->workqueue, &pcap->msr_work); 160 queue_work(pcap->workqueue, &pcap->msr_work);
161} 161}
162 162
163static struct irq_chip pcap_irq_chip = { 163static struct irq_chip pcap_irq_chip = {
164 .name = "pcap", 164 .name = "pcap",
165 .mask = pcap_mask_irq, 165 .irq_mask = pcap_mask_irq,
166 .unmask = pcap_unmask_irq, 166 .irq_unmask = pcap_unmask_irq,
167}; 167};
168 168
169static void pcap_msr_work(struct work_struct *work) 169static void pcap_msr_work(struct work_struct *work)
@@ -199,8 +199,7 @@ static void pcap_isr_work(struct work_struct *work)
199 if (service & 1) { 199 if (service & 1) {
200 struct irq_desc *desc = irq_to_desc(irq); 200 struct irq_desc *desc = irq_to_desc(irq);
201 201
202 if (WARN(!desc, KERN_WARNING 202 if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
203 "Invalid PCAP IRQ %d\n", irq))
204 break; 203 break;
205 204
206 if (desc->status & IRQ_DISABLED) 205 if (desc->status & IRQ_DISABLED)
@@ -218,7 +217,7 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
218{ 217{
219 struct pcap_chip *pcap = get_irq_data(irq); 218 struct pcap_chip *pcap = get_irq_data(irq);
220 219
221 desc->chip->ack(irq); 220 desc->irq_data.chip->irq_ack(&desc->irq_data);
222 queue_work(pcap->workqueue, &pcap->isr_work); 221 queue_work(pcap->workqueue, &pcap->isr_work);
223 return; 222 return;
224} 223}
@@ -282,7 +281,7 @@ static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
282 mutex_lock(&pcap->adc_mutex); 281 mutex_lock(&pcap->adc_mutex);
283 req = pcap->adc_queue[pcap->adc_head]; 282 req = pcap->adc_queue[pcap->adc_head];
284 283
285 if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) { 284 if (WARN(!req, "adc irq without pending request\n")) {
286 mutex_unlock(&pcap->adc_mutex); 285 mutex_unlock(&pcap->adc_mutex);
287 return IRQ_HANDLED; 286 return IRQ_HANDLED;
288 } 287 }
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index d3e74f8585e0..d00b6d1a69e5 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -70,31 +70,32 @@ static inline void ack_irqs(struct egpio_info *ei)
70 ei->ack_write, ei->ack_register << ei->bus_shift); 70 ei->ack_write, ei->ack_register << ei->bus_shift);
71} 71}
72 72
73static void egpio_ack(unsigned int irq) 73static void egpio_ack(struct irq_data *data)
74{ 74{
75} 75}
76 76
77/* There does not appear to be a way to proactively mask interrupts 77/* There does not appear to be a way to proactively mask interrupts
78 * on the egpio chip itself. So, we simply ignore interrupts that 78 * on the egpio chip itself. So, we simply ignore interrupts that
79 * aren't desired. */ 79 * aren't desired. */
80static void egpio_mask(unsigned int irq) 80static void egpio_mask(struct irq_data *data)
81{ 81{
82 struct egpio_info *ei = get_irq_chip_data(irq); 82 struct egpio_info *ei = irq_data_get_irq_chip_data(data);
83 ei->irqs_enabled &= ~(1 << (irq - ei->irq_start)); 83 ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start));
84 pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled); 84 pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled);
85} 85}
86static void egpio_unmask(unsigned int irq) 86
87static void egpio_unmask(struct irq_data *data)
87{ 88{
88 struct egpio_info *ei = get_irq_chip_data(irq); 89 struct egpio_info *ei = irq_data_get_irq_chip_data(data);
89 ei->irqs_enabled |= 1 << (irq - ei->irq_start); 90 ei->irqs_enabled |= 1 << (data->irq - ei->irq_start);
90 pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled); 91 pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled);
91} 92}
92 93
93static struct irq_chip egpio_muxed_chip = { 94static struct irq_chip egpio_muxed_chip = {
94 .name = "htc-egpio", 95 .name = "htc-egpio",
95 .ack = egpio_ack, 96 .irq_ack = egpio_ack,
96 .mask = egpio_mask, 97 .irq_mask = egpio_mask,
97 .unmask = egpio_unmask, 98 .irq_unmask = egpio_unmask,
98}; 99};
99 100
100static void egpio_handler(unsigned int irq, struct irq_desc *desc) 101static void egpio_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 594c9a8e25e1..296ad1562f69 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -82,25 +82,25 @@ struct htcpld_data {
82/* There does not appear to be a way to proactively mask interrupts 82/* There does not appear to be a way to proactively mask interrupts
83 * on the htcpld chip itself. So, we simply ignore interrupts that 83 * on the htcpld chip itself. So, we simply ignore interrupts that
84 * aren't desired. */ 84 * aren't desired. */
85static void htcpld_mask(unsigned int irq) 85static void htcpld_mask(struct irq_data *data)
86{ 86{
87 struct htcpld_chip *chip = get_irq_chip_data(irq); 87 struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
88 chip->irqs_enabled &= ~(1 << (irq - chip->irq_start)); 88 chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start));
89 pr_debug("HTCPLD mask %d %04x\n", irq, chip->irqs_enabled); 89 pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled);
90} 90}
91static void htcpld_unmask(unsigned int irq) 91static void htcpld_unmask(struct irq_data *data)
92{ 92{
93 struct htcpld_chip *chip = get_irq_chip_data(irq); 93 struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
94 chip->irqs_enabled |= 1 << (irq - chip->irq_start); 94 chip->irqs_enabled |= 1 << (data->irq - chip->irq_start);
95 pr_debug("HTCPLD unmask %d %04x\n", irq, chip->irqs_enabled); 95 pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled);
96} 96}
97 97
98static int htcpld_set_type(unsigned int irq, unsigned int flags) 98static int htcpld_set_type(struct irq_data *data, unsigned int flags)
99{ 99{
100 struct irq_desc *d = irq_to_desc(irq); 100 struct irq_desc *d = irq_to_desc(data->irq);
101 101
102 if (!d) { 102 if (!d) {
103 pr_err("HTCPLD invalid IRQ: %d\n", irq); 103 pr_err("HTCPLD invalid IRQ: %d\n", data->irq);
104 return -EINVAL; 104 return -EINVAL;
105 } 105 }
106 106
@@ -118,10 +118,10 @@ static int htcpld_set_type(unsigned int irq, unsigned int flags)
118} 118}
119 119
120static struct irq_chip htcpld_muxed_chip = { 120static struct irq_chip htcpld_muxed_chip = {
121 .name = "htcpld", 121 .name = "htcpld",
122 .mask = htcpld_mask, 122 .irq_mask = htcpld_mask,
123 .unmask = htcpld_unmask, 123 .irq_unmask = htcpld_unmask,
124 .set_type = htcpld_set_type, 124 .irq_set_type = htcpld_set_type,
125}; 125};
126 126
127/* To properly dispatch IRQ events, we need to read from the 127/* To properly dispatch IRQ events, we need to read from the
@@ -235,7 +235,7 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
235 * and that work is scheduled in the set routine. The kernel can then run 235 * and that work is scheduled in the set routine. The kernel can then run
236 * the I2C functions, which will sleep, in process context. 236 * the I2C functions, which will sleep, in process context.
237 */ 237 */
238void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val) 238static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
239{ 239{
240 struct i2c_client *client; 240 struct i2c_client *client;
241 struct htcpld_chip *chip_data; 241 struct htcpld_chip *chip_data;
@@ -259,7 +259,7 @@ void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
259 schedule_work(&(chip_data->set_val_work)); 259 schedule_work(&(chip_data->set_val_work));
260} 260}
261 261
262void htcpld_chip_set_ni(struct work_struct *work) 262static void htcpld_chip_set_ni(struct work_struct *work)
263{ 263{
264 struct htcpld_chip *chip_data; 264 struct htcpld_chip *chip_data;
265 struct i2c_client *client; 265 struct i2c_client *client;
@@ -269,7 +269,7 @@ void htcpld_chip_set_ni(struct work_struct *work)
269 i2c_smbus_read_byte_data(client, chip_data->cache_out); 269 i2c_smbus_read_byte_data(client, chip_data->cache_out);
270} 270}
271 271
272int htcpld_chip_get(struct gpio_chip *chip, unsigned offset) 272static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
273{ 273{
274 struct htcpld_chip *chip_data; 274 struct htcpld_chip *chip_data;
275 int val = 0; 275 int val = 0;
@@ -316,7 +316,7 @@ static int htcpld_direction_input(struct gpio_chip *chip,
316 return (offset < chip->ngpio) ? 0 : -EINVAL; 316 return (offset < chip->ngpio) ? 0 : -EINVAL;
317} 317}
318 318
319int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset) 319static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
320{ 320{
321 struct htcpld_chip *chip_data; 321 struct htcpld_chip *chip_data;
322 322
@@ -328,7 +328,7 @@ int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
328 return -EINVAL; 328 return -EINVAL;
329} 329}
330 330
331void htcpld_chip_reset(struct i2c_client *client) 331static void htcpld_chip_reset(struct i2c_client *client)
332{ 332{
333 struct htcpld_chip *chip_data = i2c_get_clientdata(client); 333 struct htcpld_chip *chip_data = i2c_get_clientdata(client);
334 if (!chip_data) 334 if (!chip_data)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 9dd1b33f2275..0cc59795f600 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -84,31 +84,30 @@ static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
84 spin_unlock_irqrestore(&adc->lock, flags); 84 spin_unlock_irqrestore(&adc->lock, flags);
85} 85}
86 86
87static void jz4740_adc_irq_mask(unsigned int irq) 87static void jz4740_adc_irq_mask(struct irq_data *data)
88{ 88{
89 struct jz4740_adc *adc = get_irq_chip_data(irq); 89 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
90 jz4740_adc_irq_set_masked(adc, irq, true); 90 jz4740_adc_irq_set_masked(adc, data->irq, true);
91} 91}
92 92
93static void jz4740_adc_irq_unmask(unsigned int irq) 93static void jz4740_adc_irq_unmask(struct irq_data *data)
94{ 94{
95 struct jz4740_adc *adc = get_irq_chip_data(irq); 95 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
96 jz4740_adc_irq_set_masked(adc, irq, false); 96 jz4740_adc_irq_set_masked(adc, data->irq, false);
97} 97}
98 98
99static void jz4740_adc_irq_ack(unsigned int irq) 99static void jz4740_adc_irq_ack(struct irq_data *data)
100{ 100{
101 struct jz4740_adc *adc = get_irq_chip_data(irq); 101 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
102 102 unsigned int irq = data->irq - adc->irq_base;
103 irq -= adc->irq_base;
104 writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS); 103 writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
105} 104}
106 105
107static struct irq_chip jz4740_adc_irq_chip = { 106static struct irq_chip jz4740_adc_irq_chip = {
108 .name = "jz4740-adc", 107 .name = "jz4740-adc",
109 .mask = jz4740_adc_irq_mask, 108 .irq_mask = jz4740_adc_irq_mask,
110 .unmask = jz4740_adc_irq_unmask, 109 .irq_unmask = jz4740_adc_irq_unmask,
111 .ack = jz4740_adc_irq_ack, 110 .irq_ack = jz4740_adc_irq_ack,
112}; 111};
113 112
114static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) 113static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 44695f5a1800..0e998dc4e7d8 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -407,16 +407,16 @@ static irqreturn_t max8925_tsc_irq(int irq, void *data)
407 return IRQ_HANDLED; 407 return IRQ_HANDLED;
408} 408}
409 409
410static void max8925_irq_lock(unsigned int irq) 410static void max8925_irq_lock(struct irq_data *data)
411{ 411{
412 struct max8925_chip *chip = get_irq_chip_data(irq); 412 struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
413 413
414 mutex_lock(&chip->irq_lock); 414 mutex_lock(&chip->irq_lock);
415} 415}
416 416
417static void max8925_irq_sync_unlock(unsigned int irq) 417static void max8925_irq_sync_unlock(struct irq_data *data)
418{ 418{
419 struct max8925_chip *chip = get_irq_chip_data(irq); 419 struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
420 struct max8925_irq_data *irq_data; 420 struct max8925_irq_data *irq_data;
421 static unsigned char cache_chg[2] = {0xff, 0xff}; 421 static unsigned char cache_chg[2] = {0xff, 0xff};
422 static unsigned char cache_on[2] = {0xff, 0xff}; 422 static unsigned char cache_on[2] = {0xff, 0xff};
@@ -492,25 +492,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
492 mutex_unlock(&chip->irq_lock); 492 mutex_unlock(&chip->irq_lock);
493} 493}
494 494
495static void max8925_irq_enable(unsigned int irq) 495static void max8925_irq_enable(struct irq_data *data)
496{ 496{
497 struct max8925_chip *chip = get_irq_chip_data(irq); 497 struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
498 max8925_irqs[irq - chip->irq_base].enable 498 max8925_irqs[data->irq - chip->irq_base].enable
499 = max8925_irqs[irq - chip->irq_base].offs; 499 = max8925_irqs[data->irq - chip->irq_base].offs;
500} 500}
501 501
502static void max8925_irq_disable(unsigned int irq) 502static void max8925_irq_disable(struct irq_data *data)
503{ 503{
504 struct max8925_chip *chip = get_irq_chip_data(irq); 504 struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
505 max8925_irqs[irq - chip->irq_base].enable = 0; 505 max8925_irqs[data->irq - chip->irq_base].enable = 0;
506} 506}
507 507
508static struct irq_chip max8925_irq_chip = { 508static struct irq_chip max8925_irq_chip = {
509 .name = "max8925", 509 .name = "max8925",
510 .bus_lock = max8925_irq_lock, 510 .irq_bus_lock = max8925_irq_lock,
511 .bus_sync_unlock = max8925_irq_sync_unlock, 511 .irq_bus_sync_unlock = max8925_irq_sync_unlock,
512 .enable = max8925_irq_enable, 512 .irq_enable = max8925_irq_enable,
513 .disable = max8925_irq_disable, 513 .irq_disable = max8925_irq_disable,
514}; 514};
515 515
516static int max8925_irq_init(struct max8925_chip *chip, int irq, 516static int max8925_irq_init(struct max8925_chip *chip, int irq,
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 45bfe77b639b..3903e1fbb334 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -102,16 +102,16 @@ irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
102 return &max8998_irqs[irq - max8998->irq_base]; 102 return &max8998_irqs[irq - max8998->irq_base];
103} 103}
104 104
105static void max8998_irq_lock(unsigned int irq) 105static void max8998_irq_lock(struct irq_data *data)
106{ 106{
107 struct max8998_dev *max8998 = get_irq_chip_data(irq); 107 struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
108 108
109 mutex_lock(&max8998->irqlock); 109 mutex_lock(&max8998->irqlock);
110} 110}
111 111
112static void max8998_irq_sync_unlock(unsigned int irq) 112static void max8998_irq_sync_unlock(struct irq_data *data)
113{ 113{
114 struct max8998_dev *max8998 = get_irq_chip_data(irq); 114 struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
115 int i; 115 int i;
116 116
117 for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) { 117 for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) {
@@ -129,28 +129,30 @@ static void max8998_irq_sync_unlock(unsigned int irq)
129 mutex_unlock(&max8998->irqlock); 129 mutex_unlock(&max8998->irqlock);
130} 130}
131 131
132static void max8998_irq_unmask(unsigned int irq) 132static void max8998_irq_unmask(struct irq_data *data)
133{ 133{
134 struct max8998_dev *max8998 = get_irq_chip_data(irq); 134 struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
135 struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq); 135 struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
136 data->irq);
136 137
137 max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; 138 max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
138} 139}
139 140
140static void max8998_irq_mask(unsigned int irq) 141static void max8998_irq_mask(struct irq_data *data)
141{ 142{
142 struct max8998_dev *max8998 = get_irq_chip_data(irq); 143 struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
143 struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq); 144 struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
145 data->irq);
144 146
145 max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; 147 max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
146} 148}
147 149
148static struct irq_chip max8998_irq_chip = { 150static struct irq_chip max8998_irq_chip = {
149 .name = "max8998", 151 .name = "max8998",
150 .bus_lock = max8998_irq_lock, 152 .irq_bus_lock = max8998_irq_lock,
151 .bus_sync_unlock = max8998_irq_sync_unlock, 153 .irq_bus_sync_unlock = max8998_irq_sync_unlock,
152 .mask = max8998_irq_mask, 154 .irq_mask = max8998_irq_mask,
153 .unmask = max8998_irq_unmask, 155 .irq_unmask = max8998_irq_unmask,
154}; 156};
155 157
156static irqreturn_t max8998_irq_thread(int irq, void *data) 158static irqreturn_t max8998_irq_thread(int irq, void *data)
@@ -181,6 +183,13 @@ static irqreturn_t max8998_irq_thread(int irq, void *data)
181 return IRQ_HANDLED; 183 return IRQ_HANDLED;
182} 184}
183 185
186int max8998_irq_resume(struct max8998_dev *max8998)
187{
188 if (max8998->irq && max8998->irq_base)
189 max8998_irq_thread(max8998->irq_base, max8998);
190 return 0;
191}
192
184int max8998_irq_init(struct max8998_dev *max8998) 193int max8998_irq_init(struct max8998_dev *max8998)
185{ 194{
186 int i; 195 int i;
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bb9977bebe78..bbfe86732602 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -25,6 +25,8 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/interrupt.h>
29#include <linux/pm_runtime.h>
28#include <linux/mutex.h> 30#include <linux/mutex.h>
29#include <linux/mfd/core.h> 31#include <linux/mfd/core.h>
30#include <linux/mfd/max8998.h> 32#include <linux/mfd/max8998.h>
@@ -40,6 +42,14 @@ static struct mfd_cell max8998_devs[] = {
40 }, 42 },
41}; 43};
42 44
45static struct mfd_cell lp3974_devs[] = {
46 {
47 .name = "lp3974-pmic",
48 }, {
49 .name = "lp3974-rtc",
50 },
51};
52
43int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest) 53int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
44{ 54{
45 struct max8998_dev *max8998 = i2c_get_clientdata(i2c); 55 struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
@@ -135,6 +145,7 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
135 if (pdata) { 145 if (pdata) {
136 max8998->ono = pdata->ono; 146 max8998->ono = pdata->ono;
137 max8998->irq_base = pdata->irq_base; 147 max8998->irq_base = pdata->irq_base;
148 max8998->wakeup = pdata->wakeup;
138 } 149 }
139 mutex_init(&max8998->iolock); 150 mutex_init(&max8998->iolock);
140 151
@@ -143,9 +154,23 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
143 154
144 max8998_irq_init(max8998); 155 max8998_irq_init(max8998);
145 156
146 ret = mfd_add_devices(max8998->dev, -1, 157 pm_runtime_set_active(max8998->dev);
147 max8998_devs, ARRAY_SIZE(max8998_devs), 158
148 NULL, 0); 159 switch (id->driver_data) {
160 case TYPE_LP3974:
161 ret = mfd_add_devices(max8998->dev, -1,
162 lp3974_devs, ARRAY_SIZE(lp3974_devs),
163 NULL, 0);
164 break;
165 case TYPE_MAX8998:
166 ret = mfd_add_devices(max8998->dev, -1,
167 max8998_devs, ARRAY_SIZE(max8998_devs),
168 NULL, 0);
169 break;
170 default:
171 ret = -EINVAL;
172 }
173
149 if (ret < 0) 174 if (ret < 0)
150 goto err; 175 goto err;
151 176
@@ -178,10 +203,113 @@ static const struct i2c_device_id max8998_i2c_id[] = {
178}; 203};
179MODULE_DEVICE_TABLE(i2c, max8998_i2c_id); 204MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
180 205
206static int max8998_suspend(struct device *dev)
207{
208 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
209 struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
210
211 if (max8998->wakeup)
212 set_irq_wake(max8998->irq, 1);
213 return 0;
214}
215
216static int max8998_resume(struct device *dev)
217{
218 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
219 struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
220
221 if (max8998->wakeup)
222 set_irq_wake(max8998->irq, 0);
223 /*
224 * In LP3974, if IRQ registers are not "read & clear"
225 * when it's set during sleep, the interrupt becomes
226 * disabled.
227 */
228 return max8998_irq_resume(i2c_get_clientdata(i2c));
229}
230
231struct max8998_reg_dump {
232 u8 addr;
233 u8 val;
234};
235#define SAVE_ITEM(x) { .addr = (x), .val = 0x0, }
236struct max8998_reg_dump max8998_dump[] = {
237 SAVE_ITEM(MAX8998_REG_IRQM1),
238 SAVE_ITEM(MAX8998_REG_IRQM2),
239 SAVE_ITEM(MAX8998_REG_IRQM3),
240 SAVE_ITEM(MAX8998_REG_IRQM4),
241 SAVE_ITEM(MAX8998_REG_STATUSM1),
242 SAVE_ITEM(MAX8998_REG_STATUSM2),
243 SAVE_ITEM(MAX8998_REG_CHGR1),
244 SAVE_ITEM(MAX8998_REG_CHGR2),
245 SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
246 SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
247 SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3),
248 SAVE_ITEM(MAX8998_REG_ONOFF1),
249 SAVE_ITEM(MAX8998_REG_ONOFF2),
250 SAVE_ITEM(MAX8998_REG_ONOFF3),
251 SAVE_ITEM(MAX8998_REG_ONOFF4),
252 SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1),
253 SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2),
254 SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3),
255 SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4),
256 SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1),
257 SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2),
258 SAVE_ITEM(MAX8998_REG_LDO2_LDO3),
259 SAVE_ITEM(MAX8998_REG_LDO4),
260 SAVE_ITEM(MAX8998_REG_LDO5),
261 SAVE_ITEM(MAX8998_REG_LDO6),
262 SAVE_ITEM(MAX8998_REG_LDO7),
263 SAVE_ITEM(MAX8998_REG_LDO8_LDO9),
264 SAVE_ITEM(MAX8998_REG_LDO10_LDO11),
265 SAVE_ITEM(MAX8998_REG_LDO12),
266 SAVE_ITEM(MAX8998_REG_LDO13),
267 SAVE_ITEM(MAX8998_REG_LDO14),
268 SAVE_ITEM(MAX8998_REG_LDO15),
269 SAVE_ITEM(MAX8998_REG_LDO16),
270 SAVE_ITEM(MAX8998_REG_LDO17),
271 SAVE_ITEM(MAX8998_REG_BKCHR),
272 SAVE_ITEM(MAX8998_REG_LBCNFG1),
273 SAVE_ITEM(MAX8998_REG_LBCNFG2),
274};
275/* Save registers before hibernation */
276static int max8998_freeze(struct device *dev)
277{
278 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
279 int i;
280
281 for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
282 max8998_read_reg(i2c, max8998_dump[i].addr,
283 &max8998_dump[i].val);
284
285 return 0;
286}
287
288/* Restore registers after hibernation */
289static int max8998_restore(struct device *dev)
290{
291 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
292 int i;
293
294 for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
295 max8998_write_reg(i2c, max8998_dump[i].addr,
296 max8998_dump[i].val);
297
298 return 0;
299}
300
301const struct dev_pm_ops max8998_pm = {
302 .suspend = max8998_suspend,
303 .resume = max8998_resume,
304 .freeze = max8998_freeze,
305 .restore = max8998_restore,
306};
307
181static struct i2c_driver max8998_i2c_driver = { 308static struct i2c_driver max8998_i2c_driver = {
182 .driver = { 309 .driver = {
183 .name = "max8998", 310 .name = "max8998",
184 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
312 .pm = &max8998_pm,
185 }, 313 },
186 .probe = max8998_i2c_probe, 314 .probe = max8998_i2c_probe,
187 .remove = max8998_i2c_remove, 315 .remove = max8998_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index a2ac2ed6d64c..b9fcaf0004da 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -749,7 +749,7 @@ static int mc13xxx_probe(struct spi_device *spi)
749 if (ret) { 749 if (ret) {
750err_mask: 750err_mask:
751err_revision: 751err_revision:
752 mutex_unlock(&mc13xxx->lock); 752 mc13xxx_unlock(mc13xxx);
753 dev_set_drvdata(&spi->dev, NULL); 753 dev_set_drvdata(&spi->dev, NULL);
754 kfree(mc13xxx); 754 kfree(mc13xxx);
755 return ret; 755 return ret;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index ec99f681e773..d83ad0f141af 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -15,6 +15,7 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/mfd/core.h> 17#include <linux/mfd/core.h>
18#include <linux/pm_runtime.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19 20
20static int mfd_add_device(struct device *parent, int id, 21static int mfd_add_device(struct device *parent, int id,
@@ -82,6 +83,9 @@ static int mfd_add_device(struct device *parent, int id,
82 if (ret) 83 if (ret)
83 goto fail_res; 84 goto fail_res;
84 85
86 if (cell->pm_runtime_no_callbacks)
87 pm_runtime_no_callbacks(&pdev->dev);
88
85 kfree(res); 89 kfree(res);
86 90
87 return 0; 91 return 0;
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc9275c12133..5de3a760ea1e 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -26,7 +26,7 @@
26#include <linux/sm501-regs.h> 26#include <linux/sm501-regs.h>
27#include <linux/serial_8250.h> 27#include <linux/serial_8250.h>
28 28
29#include <asm/io.h> 29#include <linux/io.h>
30 30
31struct sm501_device { 31struct sm501_device {
32 struct list_head list; 32 struct list_head list;
@@ -745,11 +745,8 @@ static int sm501_register_device(struct sm501_devdata *sm,
745 int ret; 745 int ret;
746 746
747 for (ptr = 0; ptr < pdev->num_resources; ptr++) { 747 for (ptr = 0; ptr < pdev->num_resources; ptr++) {
748 printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n", 748 printk(KERN_DEBUG "%s[%d] %pR\n",
749 pdev->name, ptr, 749 pdev->name, ptr, &pdev->resource[ptr]);
750 pdev->resource[ptr].flags,
751 (unsigned long long)pdev->resource[ptr].start,
752 (unsigned long long)pdev->resource[ptr].end);
753 } 750 }
754 751
755 ret = platform_device_register(pdev); 752 ret = platform_device_register(pdev);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index b11487f1e1cb..3e5732b58c49 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -699,16 +699,16 @@ static irqreturn_t stmpe_irq(int irq, void *data)
699 return IRQ_HANDLED; 699 return IRQ_HANDLED;
700} 700}
701 701
702static void stmpe_irq_lock(unsigned int irq) 702static void stmpe_irq_lock(struct irq_data *data)
703{ 703{
704 struct stmpe *stmpe = get_irq_chip_data(irq); 704 struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
705 705
706 mutex_lock(&stmpe->irq_lock); 706 mutex_lock(&stmpe->irq_lock);
707} 707}
708 708
709static void stmpe_irq_sync_unlock(unsigned int irq) 709static void stmpe_irq_sync_unlock(struct irq_data *data)
710{ 710{
711 struct stmpe *stmpe = get_irq_chip_data(irq); 711 struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
712 struct stmpe_variant_info *variant = stmpe->variant; 712 struct stmpe_variant_info *variant = stmpe->variant;
713 int num = DIV_ROUND_UP(variant->num_irqs, 8); 713 int num = DIV_ROUND_UP(variant->num_irqs, 8);
714 int i; 714 int i;
@@ -727,20 +727,20 @@ static void stmpe_irq_sync_unlock(unsigned int irq)
727 mutex_unlock(&stmpe->irq_lock); 727 mutex_unlock(&stmpe->irq_lock);
728} 728}
729 729
730static void stmpe_irq_mask(unsigned int irq) 730static void stmpe_irq_mask(struct irq_data *data)
731{ 731{
732 struct stmpe *stmpe = get_irq_chip_data(irq); 732 struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
733 int offset = irq - stmpe->irq_base; 733 int offset = data->irq - stmpe->irq_base;
734 int regoffset = offset / 8; 734 int regoffset = offset / 8;
735 int mask = 1 << (offset % 8); 735 int mask = 1 << (offset % 8);
736 736
737 stmpe->ier[regoffset] &= ~mask; 737 stmpe->ier[regoffset] &= ~mask;
738} 738}
739 739
740static void stmpe_irq_unmask(unsigned int irq) 740static void stmpe_irq_unmask(struct irq_data *data)
741{ 741{
742 struct stmpe *stmpe = get_irq_chip_data(irq); 742 struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
743 int offset = irq - stmpe->irq_base; 743 int offset = data->irq - stmpe->irq_base;
744 int regoffset = offset / 8; 744 int regoffset = offset / 8;
745 int mask = 1 << (offset % 8); 745 int mask = 1 << (offset % 8);
746 746
@@ -749,10 +749,10 @@ static void stmpe_irq_unmask(unsigned int irq)
749 749
750static struct irq_chip stmpe_irq_chip = { 750static struct irq_chip stmpe_irq_chip = {
751 .name = "stmpe", 751 .name = "stmpe",
752 .bus_lock = stmpe_irq_lock, 752 .irq_bus_lock = stmpe_irq_lock,
753 .bus_sync_unlock = stmpe_irq_sync_unlock, 753 .irq_bus_sync_unlock = stmpe_irq_sync_unlock,
754 .mask = stmpe_irq_mask, 754 .irq_mask = stmpe_irq_mask,
755 .unmask = stmpe_irq_unmask, 755 .irq_unmask = stmpe_irq_unmask,
756}; 756};
757 757
758static int __devinit stmpe_irq_init(struct stmpe *stmpe) 758static int __devinit stmpe_irq_init(struct stmpe *stmpe)
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 006c121f3f0d..9caeb4ac6ea6 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -199,37 +199,37 @@ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
199 generic_handle_irq(irq_base + i); 199 generic_handle_irq(irq_base + i);
200} 200}
201 201
202static void t7l66xb_irq_mask(unsigned int irq) 202static void t7l66xb_irq_mask(struct irq_data *data)
203{ 203{
204 struct t7l66xb *t7l66xb = get_irq_chip_data(irq); 204 struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
205 unsigned long flags; 205 unsigned long flags;
206 u8 imr; 206 u8 imr;
207 207
208 spin_lock_irqsave(&t7l66xb->lock, flags); 208 spin_lock_irqsave(&t7l66xb->lock, flags);
209 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); 209 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
210 imr |= 1 << (irq - t7l66xb->irq_base); 210 imr |= 1 << (data->irq - t7l66xb->irq_base);
211 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); 211 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
212 spin_unlock_irqrestore(&t7l66xb->lock, flags); 212 spin_unlock_irqrestore(&t7l66xb->lock, flags);
213} 213}
214 214
215static void t7l66xb_irq_unmask(unsigned int irq) 215static void t7l66xb_irq_unmask(struct irq_data *data)
216{ 216{
217 struct t7l66xb *t7l66xb = get_irq_chip_data(irq); 217 struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
218 unsigned long flags; 218 unsigned long flags;
219 u8 imr; 219 u8 imr;
220 220
221 spin_lock_irqsave(&t7l66xb->lock, flags); 221 spin_lock_irqsave(&t7l66xb->lock, flags);
222 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); 222 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
223 imr &= ~(1 << (irq - t7l66xb->irq_base)); 223 imr &= ~(1 << (data->irq - t7l66xb->irq_base));
224 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); 224 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
225 spin_unlock_irqrestore(&t7l66xb->lock, flags); 225 spin_unlock_irqrestore(&t7l66xb->lock, flags);
226} 226}
227 227
228static struct irq_chip t7l66xb_chip = { 228static struct irq_chip t7l66xb_chip = {
229 .name = "t7l66xb", 229 .name = "t7l66xb",
230 .ack = t7l66xb_irq_mask, 230 .irq_ack = t7l66xb_irq_mask,
231 .mask = t7l66xb_irq_mask, 231 .irq_mask = t7l66xb_irq_mask,
232 .unmask = t7l66xb_irq_unmask, 232 .irq_unmask = t7l66xb_irq_unmask,
233}; 233};
234 234
235/*--------------------------------------------------------------------------*/ 235/*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1ea80d8ad915..9a238633a54d 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -527,41 +527,41 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
527 } 527 }
528} 528}
529 529
530static void tc6393xb_irq_ack(unsigned int irq) 530static void tc6393xb_irq_ack(struct irq_data *data)
531{ 531{
532} 532}
533 533
534static void tc6393xb_irq_mask(unsigned int irq) 534static void tc6393xb_irq_mask(struct irq_data *data)
535{ 535{
536 struct tc6393xb *tc6393xb = get_irq_chip_data(irq); 536 struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
537 unsigned long flags; 537 unsigned long flags;
538 u8 imr; 538 u8 imr;
539 539
540 spin_lock_irqsave(&tc6393xb->lock, flags); 540 spin_lock_irqsave(&tc6393xb->lock, flags);
541 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); 541 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
542 imr |= 1 << (irq - tc6393xb->irq_base); 542 imr |= 1 << (data->irq - tc6393xb->irq_base);
543 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); 543 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
544 spin_unlock_irqrestore(&tc6393xb->lock, flags); 544 spin_unlock_irqrestore(&tc6393xb->lock, flags);
545} 545}
546 546
547static void tc6393xb_irq_unmask(unsigned int irq) 547static void tc6393xb_irq_unmask(struct irq_data *data)
548{ 548{
549 struct tc6393xb *tc6393xb = get_irq_chip_data(irq); 549 struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
550 unsigned long flags; 550 unsigned long flags;
551 u8 imr; 551 u8 imr;
552 552
553 spin_lock_irqsave(&tc6393xb->lock, flags); 553 spin_lock_irqsave(&tc6393xb->lock, flags);
554 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); 554 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
555 imr &= ~(1 << (irq - tc6393xb->irq_base)); 555 imr &= ~(1 << (data->irq - tc6393xb->irq_base));
556 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); 556 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
557 spin_unlock_irqrestore(&tc6393xb->lock, flags); 557 spin_unlock_irqrestore(&tc6393xb->lock, flags);
558} 558}
559 559
560static struct irq_chip tc6393xb_chip = { 560static struct irq_chip tc6393xb_chip = {
561 .name = "tc6393xb", 561 .name = "tc6393xb",
562 .ack = tc6393xb_irq_ack, 562 .irq_ack = tc6393xb_irq_ack,
563 .mask = tc6393xb_irq_mask, 563 .irq_mask = tc6393xb_irq_mask,
564 .unmask = tc6393xb_irq_unmask, 564 .irq_unmask = tc6393xb_irq_unmask,
565}; 565};
566 566
567static void tc6393xb_attach_irq(struct platform_device *dev) 567static void tc6393xb_attach_irq(struct platform_device *dev)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 90187fe33e04..93d5fdf020c7 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -34,7 +34,7 @@
34 34
35#include <linux/i2c/tps65010.h> 35#include <linux/i2c/tps65010.h>
36 36
37#include <asm/gpio.h> 37#include <linux/gpio.h>
38 38
39 39
40/*-------------------------------------------------------------------------*/ 40/*-------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b4931ab34929..627cf577b16d 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -46,8 +46,6 @@
46 46
47/* device id */ 47/* device id */
48#define TPS6586X_VERSIONCRC 0xcd 48#define TPS6586X_VERSIONCRC 0xcd
49#define TPS658621A_VERSIONCRC 0x15
50#define TPS658621C_VERSIONCRC 0x2c
51 49
52struct tps6586x_irq_data { 50struct tps6586x_irq_data {
53 u8 mask_reg; 51 u8 mask_reg;
@@ -325,37 +323,37 @@ static int tps6586x_remove_subdevs(struct tps6586x *tps6586x)
325 return device_for_each_child(tps6586x->dev, NULL, __remove_subdev); 323 return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
326} 324}
327 325
328static void tps6586x_irq_lock(unsigned int irq) 326static void tps6586x_irq_lock(struct irq_data *data)
329{ 327{
330 struct tps6586x *tps6586x = get_irq_chip_data(irq); 328 struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
331 329
332 mutex_lock(&tps6586x->irq_lock); 330 mutex_lock(&tps6586x->irq_lock);
333} 331}
334 332
335static void tps6586x_irq_enable(unsigned int irq) 333static void tps6586x_irq_enable(struct irq_data *irq_data)
336{ 334{
337 struct tps6586x *tps6586x = get_irq_chip_data(irq); 335 struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
338 unsigned int __irq = irq - tps6586x->irq_base; 336 unsigned int __irq = irq_data->irq - tps6586x->irq_base;
339 const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq]; 337 const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
340 338
341 tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask; 339 tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
342 tps6586x->irq_en |= (1 << __irq); 340 tps6586x->irq_en |= (1 << __irq);
343} 341}
344 342
345static void tps6586x_irq_disable(unsigned int irq) 343static void tps6586x_irq_disable(struct irq_data *irq_data)
346{ 344{
347 struct tps6586x *tps6586x = get_irq_chip_data(irq); 345 struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
348 346
349 unsigned int __irq = irq - tps6586x->irq_base; 347 unsigned int __irq = irq_data->irq - tps6586x->irq_base;
350 const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq]; 348 const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
351 349
352 tps6586x->mask_reg[data->mask_reg] |= data->mask_mask; 350 tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
353 tps6586x->irq_en &= ~(1 << __irq); 351 tps6586x->irq_en &= ~(1 << __irq);
354} 352}
355 353
356static void tps6586x_irq_sync_unlock(unsigned int irq) 354static void tps6586x_irq_sync_unlock(struct irq_data *data)
357{ 355{
358 struct tps6586x *tps6586x = get_irq_chip_data(irq); 356 struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
359 int i; 357 int i;
360 358
361 for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) { 359 for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
@@ -421,10 +419,10 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
421 tps6586x->irq_base = irq_base; 419 tps6586x->irq_base = irq_base;
422 420
423 tps6586x->irq_chip.name = "tps6586x"; 421 tps6586x->irq_chip.name = "tps6586x";
424 tps6586x->irq_chip.enable = tps6586x_irq_enable; 422 tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
425 tps6586x->irq_chip.disable = tps6586x_irq_disable; 423 tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
426 tps6586x->irq_chip.bus_lock = tps6586x_irq_lock; 424 tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
427 tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock; 425 tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
428 426
429 for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) { 427 for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
430 int __irq = i + tps6586x->irq_base; 428 int __irq = i + tps6586x->irq_base;
@@ -498,11 +496,7 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
498 return -EIO; 496 return -EIO;
499 } 497 }
500 498
501 if ((ret != TPS658621A_VERSIONCRC) && 499 dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
502 (ret != TPS658621C_VERSIONCRC)) {
503 dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
504 return -ENODEV;
505 }
506 500
507 tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL); 501 tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
508 if (tps6586x == NULL) 502 if (tps6586x == NULL)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 12abd5b924b3..a35fa7dcbf53 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1003,7 +1003,7 @@ static int twl_remove(struct i2c_client *client)
1003} 1003}
1004 1004
1005/* NOTE: this driver only handles a single twl4030/tps659x0 chip */ 1005/* NOTE: this driver only handles a single twl4030/tps659x0 chip */
1006static int __init 1006static int __devinit
1007twl_probe(struct i2c_client *client, const struct i2c_device_id *id) 1007twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1008{ 1008{
1009 int status; 1009 int status;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d3a1478004b..63a30e88908f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -599,38 +599,38 @@ static void twl4030_sih_do_edge(struct work_struct *work)
599 * completion, potentially including some re-ordering, of these requests. 599 * completion, potentially including some re-ordering, of these requests.
600 */ 600 */
601 601
602static void twl4030_sih_mask(unsigned irq) 602static void twl4030_sih_mask(struct irq_data *data)
603{ 603{
604 struct sih_agent *sih = get_irq_chip_data(irq); 604 struct sih_agent *sih = irq_data_get_irq_chip_data(data);
605 unsigned long flags; 605 unsigned long flags;
606 606
607 spin_lock_irqsave(&sih_agent_lock, flags); 607 spin_lock_irqsave(&sih_agent_lock, flags);
608 sih->imr |= BIT(irq - sih->irq_base); 608 sih->imr |= BIT(data->irq - sih->irq_base);
609 sih->imr_change_pending = true; 609 sih->imr_change_pending = true;
610 queue_work(wq, &sih->mask_work); 610 queue_work(wq, &sih->mask_work);
611 spin_unlock_irqrestore(&sih_agent_lock, flags); 611 spin_unlock_irqrestore(&sih_agent_lock, flags);
612} 612}
613 613
614static void twl4030_sih_unmask(unsigned irq) 614static void twl4030_sih_unmask(struct irq_data *data)
615{ 615{
616 struct sih_agent *sih = get_irq_chip_data(irq); 616 struct sih_agent *sih = irq_data_get_irq_chip_data(data);
617 unsigned long flags; 617 unsigned long flags;
618 618
619 spin_lock_irqsave(&sih_agent_lock, flags); 619 spin_lock_irqsave(&sih_agent_lock, flags);
620 sih->imr &= ~BIT(irq - sih->irq_base); 620 sih->imr &= ~BIT(data->irq - sih->irq_base);
621 sih->imr_change_pending = true; 621 sih->imr_change_pending = true;
622 queue_work(wq, &sih->mask_work); 622 queue_work(wq, &sih->mask_work);
623 spin_unlock_irqrestore(&sih_agent_lock, flags); 623 spin_unlock_irqrestore(&sih_agent_lock, flags);
624} 624}
625 625
626static int twl4030_sih_set_type(unsigned irq, unsigned trigger) 626static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
627{ 627{
628 struct sih_agent *sih = get_irq_chip_data(irq); 628 struct sih_agent *sih = irq_data_get_irq_chip_data(data);
629 struct irq_desc *desc = irq_to_desc(irq); 629 struct irq_desc *desc = irq_to_desc(data->irq);
630 unsigned long flags; 630 unsigned long flags;
631 631
632 if (!desc) { 632 if (!desc) {
633 pr_err("twl4030: Invalid IRQ: %d\n", irq); 633 pr_err("twl4030: Invalid IRQ: %d\n", data->irq);
634 return -EINVAL; 634 return -EINVAL;
635 } 635 }
636 636
@@ -641,7 +641,7 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
641 if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) { 641 if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
642 desc->status &= ~IRQ_TYPE_SENSE_MASK; 642 desc->status &= ~IRQ_TYPE_SENSE_MASK;
643 desc->status |= trigger; 643 desc->status |= trigger;
644 sih->edge_change |= BIT(irq - sih->irq_base); 644 sih->edge_change |= BIT(data->irq - sih->irq_base);
645 queue_work(wq, &sih->edge_work); 645 queue_work(wq, &sih->edge_work);
646 } 646 }
647 spin_unlock_irqrestore(&sih_agent_lock, flags); 647 spin_unlock_irqrestore(&sih_agent_lock, flags);
@@ -650,9 +650,9 @@ static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
650 650
651static struct irq_chip twl4030_sih_irq_chip = { 651static struct irq_chip twl4030_sih_irq_chip = {
652 .name = "twl4030", 652 .name = "twl4030",
653 .mask = twl4030_sih_mask, 653 .irq_mask = twl4030_sih_mask,
654 .unmask = twl4030_sih_unmask, 654 .irq_unmask = twl4030_sih_unmask,
655 .set_type = twl4030_sih_set_type, 655 .irq_set_type = twl4030_sih_set_type,
656}; 656};
657 657
658/*----------------------------------------------------------------------*/ 658/*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 06c8955907e9..4082ed73613f 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -332,7 +332,7 @@ int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
332 */ 332 */
333 twl6030_irq_chip = dummy_irq_chip; 333 twl6030_irq_chip = dummy_irq_chip;
334 twl6030_irq_chip.name = "twl6030"; 334 twl6030_irq_chip.name = "twl6030";
335 twl6030_irq_chip.set_type = NULL; 335 twl6030_irq_chip.irq_set_type = NULL;
336 336
337 for (i = irq_base; i < irq_end; i++) { 337 for (i = irq_base; i < irq_end; i++) {
338 set_irq_chip_and_handler(i, &twl6030_irq_chip, 338 set_irq_chip_and_handler(i, &twl6030_irq_chip,
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index ebb059765edd..348052aa5dbf 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -112,7 +112,7 @@ out:
112 return ret; 112 return ret;
113} 113}
114 114
115static void vx855_remove(struct pci_dev *pdev) 115static void __devexit vx855_remove(struct pci_dev *pdev)
116{ 116{
117 mfd_remove_devices(&pdev->dev); 117 mfd_remove_devices(&pdev->dev);
118 pci_disable_device(pdev); 118 pci_disable_device(pdev);
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 76cadcf3b1fe..3fe9a58fe6c7 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1541,6 +1541,12 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1541 dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev); 1541 dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev);
1542 break; 1542 break;
1543 1543
1544 case WM8326:
1545 parent = WM8326;
1546 wm831x->num_gpio = 12;
1547 dev_info(wm831x->dev, "WM8326 revision %c\n", 'A' + rev);
1548 break;
1549
1544 default: 1550 default:
1545 dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret); 1551 dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
1546 ret = -EINVAL; 1552 ret = -EINVAL;
@@ -1610,18 +1616,9 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1610 break; 1616 break;
1611 1617
1612 case WM8320: 1618 case WM8320:
1613 ret = mfd_add_devices(wm831x->dev, -1,
1614 wm8320_devs, ARRAY_SIZE(wm8320_devs),
1615 NULL, 0);
1616 break;
1617
1618 case WM8321: 1619 case WM8321:
1619 ret = mfd_add_devices(wm831x->dev, -1,
1620 wm8320_devs, ARRAY_SIZE(wm8320_devs),
1621 NULL, 0);
1622 break;
1623
1624 case WM8325: 1620 case WM8325:
1621 case WM8326:
1625 ret = mfd_add_devices(wm831x->dev, -1, 1622 ret = mfd_add_devices(wm831x->dev, -1,
1626 wm8320_devs, ARRAY_SIZE(wm8320_devs), 1623 wm8320_devs, ARRAY_SIZE(wm8320_devs),
1627 NULL, wm831x->irq_base); 1624 NULL, wm831x->irq_base);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 156b19859e81..3853fa8e7cc2 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -94,9 +94,9 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
94 return 0; 94 return 0;
95} 95}
96 96
97static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg) 97static int wm831x_i2c_suspend(struct device *dev)
98{ 98{
99 struct wm831x *wm831x = i2c_get_clientdata(i2c); 99 struct wm831x *wm831x = dev_get_drvdata(dev);
100 100
101 return wm831x_device_suspend(wm831x); 101 return wm831x_device_suspend(wm831x);
102} 102}
@@ -108,19 +108,23 @@ static const struct i2c_device_id wm831x_i2c_id[] = {
108 { "wm8320", WM8320 }, 108 { "wm8320", WM8320 },
109 { "wm8321", WM8321 }, 109 { "wm8321", WM8321 },
110 { "wm8325", WM8325 }, 110 { "wm8325", WM8325 },
111 { "wm8326", WM8326 },
111 { } 112 { }
112}; 113};
113MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id); 114MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
114 115
116static const struct dev_pm_ops wm831x_pm_ops = {
117 .suspend = wm831x_i2c_suspend,
118};
115 119
116static struct i2c_driver wm831x_i2c_driver = { 120static struct i2c_driver wm831x_i2c_driver = {
117 .driver = { 121 .driver = {
118 .name = "wm831x", 122 .name = "wm831x",
119 .owner = THIS_MODULE, 123 .owner = THIS_MODULE,
124 .pm = &wm831x_pm_ops,
120 }, 125 },
121 .probe = wm831x_i2c_probe, 126 .probe = wm831x_i2c_probe,
122 .remove = wm831x_i2c_remove, 127 .remove = wm831x_i2c_remove,
123 .suspend = wm831x_i2c_suspend,
124 .id_table = wm831x_i2c_id, 128 .id_table = wm831x_i2c_id,
125}; 129};
126 130
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 294183b6260b..f7192d438aab 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -345,16 +345,16 @@ static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
345 return &wm831x_irqs[irq - wm831x->irq_base]; 345 return &wm831x_irqs[irq - wm831x->irq_base];
346} 346}
347 347
348static void wm831x_irq_lock(unsigned int irq) 348static void wm831x_irq_lock(struct irq_data *data)
349{ 349{
350 struct wm831x *wm831x = get_irq_chip_data(irq); 350 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
351 351
352 mutex_lock(&wm831x->irq_lock); 352 mutex_lock(&wm831x->irq_lock);
353} 353}
354 354
355static void wm831x_irq_sync_unlock(unsigned int irq) 355static void wm831x_irq_sync_unlock(struct irq_data *data)
356{ 356{
357 struct wm831x *wm831x = get_irq_chip_data(irq); 357 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
358 int i; 358 int i;
359 359
360 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { 360 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -371,28 +371,30 @@ static void wm831x_irq_sync_unlock(unsigned int irq)
371 mutex_unlock(&wm831x->irq_lock); 371 mutex_unlock(&wm831x->irq_lock);
372} 372}
373 373
374static void wm831x_irq_unmask(unsigned int irq) 374static void wm831x_irq_unmask(struct irq_data *data)
375{ 375{
376 struct wm831x *wm831x = get_irq_chip_data(irq); 376 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
377 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); 377 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
378 data->irq);
378 379
379 wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; 380 wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
380} 381}
381 382
382static void wm831x_irq_mask(unsigned int irq) 383static void wm831x_irq_mask(struct irq_data *data)
383{ 384{
384 struct wm831x *wm831x = get_irq_chip_data(irq); 385 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
385 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq); 386 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
387 data->irq);
386 388
387 wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; 389 wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
388} 390}
389 391
390static int wm831x_irq_set_type(unsigned int irq, unsigned int type) 392static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
391{ 393{
392 struct wm831x *wm831x = get_irq_chip_data(irq); 394 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
393 int val; 395 int val, irq;
394 396
395 irq = irq - wm831x->irq_base; 397 irq = data->irq - wm831x->irq_base;
396 398
397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { 399 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
398 /* Ignore internal-only IRQs */ 400 /* Ignore internal-only IRQs */
@@ -421,12 +423,12 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
421} 423}
422 424
423static struct irq_chip wm831x_irq_chip = { 425static struct irq_chip wm831x_irq_chip = {
424 .name = "wm831x", 426 .name = "wm831x",
425 .bus_lock = wm831x_irq_lock, 427 .irq_bus_lock = wm831x_irq_lock,
426 .bus_sync_unlock = wm831x_irq_sync_unlock, 428 .irq_bus_sync_unlock = wm831x_irq_sync_unlock,
427 .mask = wm831x_irq_mask, 429 .irq_mask = wm831x_irq_mask,
428 .unmask = wm831x_irq_unmask, 430 .irq_unmask = wm831x_irq_unmask,
429 .set_type = wm831x_irq_set_type, 431 .irq_set_type = wm831x_irq_set_type,
430}; 432};
431 433
432/* The processing of the primary interrupt occurs in a thread so that 434/* The processing of the primary interrupt occurs in a thread so that
@@ -515,6 +517,17 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
515 return 0; 517 return 0;
516 } 518 }
517 519
520 /* Try to flag /IRQ as a wake source; there are a number of
521 * unconditional wake sources in the PMIC so this isn't
522 * conditional but we don't actually care *too* much if it
523 * fails.
524 */
525 ret = enable_irq_wake(irq);
526 if (ret != 0) {
527 dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
528 ret);
529 }
530
518 wm831x->irq = irq; 531 wm831x->irq = irq;
519 wm831x->irq_base = pdata->irq_base; 532 wm831x->irq_base = pdata->irq_base;
520 533
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 2789b151b0f9..0a8f772be88c 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -81,6 +81,8 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
81 type = WM8321; 81 type = WM8321;
82 else if (strcmp(spi->modalias, "wm8325") == 0) 82 else if (strcmp(spi->modalias, "wm8325") == 0)
83 type = WM8325; 83 type = WM8325;
84 else if (strcmp(spi->modalias, "wm8326") == 0)
85 type = WM8326;
84 else { 86 else {
85 dev_err(&spi->dev, "Unknown device type\n"); 87 dev_err(&spi->dev, "Unknown device type\n");
86 return -EINVAL; 88 return -EINVAL;
@@ -184,6 +186,17 @@ static struct spi_driver wm8325_spi_driver = {
184 .suspend = wm831x_spi_suspend, 186 .suspend = wm831x_spi_suspend,
185}; 187};
186 188
189static struct spi_driver wm8326_spi_driver = {
190 .driver = {
191 .name = "wm8326",
192 .bus = &spi_bus_type,
193 .owner = THIS_MODULE,
194 },
195 .probe = wm831x_spi_probe,
196 .remove = __devexit_p(wm831x_spi_remove),
197 .suspend = wm831x_spi_suspend,
198};
199
187static int __init wm831x_spi_init(void) 200static int __init wm831x_spi_init(void)
188{ 201{
189 int ret; 202 int ret;
@@ -212,12 +225,17 @@ static int __init wm831x_spi_init(void)
212 if (ret != 0) 225 if (ret != 0)
213 pr_err("Failed to register WM8325 SPI driver: %d\n", ret); 226 pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
214 227
228 ret = spi_register_driver(&wm8326_spi_driver);
229 if (ret != 0)
230 pr_err("Failed to register WM8326 SPI driver: %d\n", ret);
231
215 return 0; 232 return 0;
216} 233}
217subsys_initcall(wm831x_spi_init); 234subsys_initcall(wm831x_spi_init);
218 235
219static void __exit wm831x_spi_exit(void) 236static void __exit wm831x_spi_exit(void)
220{ 237{
238 spi_unregister_driver(&wm8326_spi_driver);
221 spi_unregister_driver(&wm8325_spi_driver); 239 spi_unregister_driver(&wm8325_spi_driver);
222 spi_unregister_driver(&wm8321_spi_driver); 240 spi_unregister_driver(&wm8321_spi_driver);
223 spi_unregister_driver(&wm8320_spi_driver); 241 spi_unregister_driver(&wm8320_spi_driver);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index f56c9adf9493..5839966ebd85 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -417,16 +417,16 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data)
417 return IRQ_HANDLED; 417 return IRQ_HANDLED;
418} 418}
419 419
420static void wm8350_irq_lock(unsigned int irq) 420static void wm8350_irq_lock(struct irq_data *data)
421{ 421{
422 struct wm8350 *wm8350 = get_irq_chip_data(irq); 422 struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
423 423
424 mutex_lock(&wm8350->irq_lock); 424 mutex_lock(&wm8350->irq_lock);
425} 425}
426 426
427static void wm8350_irq_sync_unlock(unsigned int irq) 427static void wm8350_irq_sync_unlock(struct irq_data *data)
428{ 428{
429 struct wm8350 *wm8350 = get_irq_chip_data(irq); 429 struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
430 int i; 430 int i;
431 431
432 for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) { 432 for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
@@ -442,28 +442,30 @@ static void wm8350_irq_sync_unlock(unsigned int irq)
442 mutex_unlock(&wm8350->irq_lock); 442 mutex_unlock(&wm8350->irq_lock);
443} 443}
444 444
445static void wm8350_irq_enable(unsigned int irq) 445static void wm8350_irq_enable(struct irq_data *data)
446{ 446{
447 struct wm8350 *wm8350 = get_irq_chip_data(irq); 447 struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
448 struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq); 448 struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
449 data->irq);
449 450
450 wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask; 451 wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask;
451} 452}
452 453
453static void wm8350_irq_disable(unsigned int irq) 454static void wm8350_irq_disable(struct irq_data *data)
454{ 455{
455 struct wm8350 *wm8350 = get_irq_chip_data(irq); 456 struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
456 struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq); 457 struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
458 data->irq);
457 459
458 wm8350->irq_masks[irq_data->reg] |= irq_data->mask; 460 wm8350->irq_masks[irq_data->reg] |= irq_data->mask;
459} 461}
460 462
461static struct irq_chip wm8350_irq_chip = { 463static struct irq_chip wm8350_irq_chip = {
462 .name = "wm8350", 464 .name = "wm8350",
463 .bus_lock = wm8350_irq_lock, 465 .irq_bus_lock = wm8350_irq_lock,
464 .bus_sync_unlock = wm8350_irq_sync_unlock, 466 .irq_bus_sync_unlock = wm8350_irq_sync_unlock,
465 .disable = wm8350_irq_disable, 467 .irq_disable = wm8350_irq_disable,
466 .enable = wm8350_irq_enable, 468 .irq_enable = wm8350_irq_enable,
467}; 469};
468 470
469int wm8350_irq_init(struct wm8350 *wm8350, int irq, 471int wm8350_irq_init(struct wm8350 *wm8350, int irq,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 8d221ba5e38d..41233c7fa581 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -18,6 +18,7 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/mfd/core.h> 20#include <linux/mfd/core.h>
21#include <linux/pm_runtime.h>
21#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
22#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
23 24
@@ -169,8 +170,16 @@ out:
169EXPORT_SYMBOL_GPL(wm8994_set_bits); 170EXPORT_SYMBOL_GPL(wm8994_set_bits);
170 171
171static struct mfd_cell wm8994_regulator_devs[] = { 172static struct mfd_cell wm8994_regulator_devs[] = {
172 { .name = "wm8994-ldo", .id = 1 }, 173 {
173 { .name = "wm8994-ldo", .id = 2 }, 174 .name = "wm8994-ldo",
175 .id = 1,
176 .pm_runtime_no_callbacks = true,
177 },
178 {
179 .name = "wm8994-ldo",
180 .id = 2,
181 .pm_runtime_no_callbacks = true,
182 },
174}; 183};
175 184
176static struct resource wm8994_codec_resources[] = { 185static struct resource wm8994_codec_resources[] = {
@@ -200,6 +209,7 @@ static struct mfd_cell wm8994_devs[] = {
200 .name = "wm8994-gpio", 209 .name = "wm8994-gpio",
201 .num_resources = ARRAY_SIZE(wm8994_gpio_resources), 210 .num_resources = ARRAY_SIZE(wm8994_gpio_resources),
202 .resources = wm8994_gpio_resources, 211 .resources = wm8994_gpio_resources,
212 .pm_runtime_no_callbacks = true,
203 }, 213 },
204}; 214};
205 215
@@ -231,7 +241,7 @@ static const char *wm8958_main_supplies[] = {
231}; 241};
232 242
233#ifdef CONFIG_PM 243#ifdef CONFIG_PM
234static int wm8994_device_suspend(struct device *dev) 244static int wm8994_suspend(struct device *dev)
235{ 245{
236 struct wm8994 *wm8994 = dev_get_drvdata(dev); 246 struct wm8994 *wm8994 = dev_get_drvdata(dev);
237 int ret; 247 int ret;
@@ -261,7 +271,7 @@ static int wm8994_device_suspend(struct device *dev)
261 return 0; 271 return 0;
262} 272}
263 273
264static int wm8994_device_resume(struct device *dev) 274static int wm8994_resume(struct device *dev)
265{ 275{
266 struct wm8994 *wm8994 = dev_get_drvdata(dev); 276 struct wm8994 *wm8994 = dev_get_drvdata(dev);
267 int ret; 277 int ret;
@@ -471,6 +481,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
471 goto err_irq; 481 goto err_irq;
472 } 482 }
473 483
484 pm_runtime_enable(wm8994->dev);
485 pm_runtime_resume(wm8994->dev);
486
474 return 0; 487 return 0;
475 488
476err_irq: 489err_irq:
@@ -490,6 +503,7 @@ err:
490 503
491static void wm8994_device_exit(struct wm8994 *wm8994) 504static void wm8994_device_exit(struct wm8994 *wm8994)
492{ 505{
506 pm_runtime_disable(wm8994->dev);
493 mfd_remove_devices(wm8994->dev); 507 mfd_remove_devices(wm8994->dev);
494 wm8994_irq_exit(wm8994); 508 wm8994_irq_exit(wm8994);
495 regulator_bulk_disable(wm8994->num_supplies, 509 regulator_bulk_disable(wm8994->num_supplies,
@@ -573,21 +587,6 @@ static int wm8994_i2c_remove(struct i2c_client *i2c)
573 return 0; 587 return 0;
574} 588}
575 589
576#ifdef CONFIG_PM
577static int wm8994_i2c_suspend(struct i2c_client *i2c, pm_message_t state)
578{
579 return wm8994_device_suspend(&i2c->dev);
580}
581
582static int wm8994_i2c_resume(struct i2c_client *i2c)
583{
584 return wm8994_device_resume(&i2c->dev);
585}
586#else
587#define wm8994_i2c_suspend NULL
588#define wm8994_i2c_resume NULL
589#endif
590
591static const struct i2c_device_id wm8994_i2c_id[] = { 590static const struct i2c_device_id wm8994_i2c_id[] = {
592 { "wm8994", WM8994 }, 591 { "wm8994", WM8994 },
593 { "wm8958", WM8958 }, 592 { "wm8958", WM8958 },
@@ -595,15 +594,16 @@ static const struct i2c_device_id wm8994_i2c_id[] = {
595}; 594};
596MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id); 595MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
597 596
597UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
598
598static struct i2c_driver wm8994_i2c_driver = { 599static struct i2c_driver wm8994_i2c_driver = {
599 .driver = { 600 .driver = {
600 .name = "wm8994", 601 .name = "wm8994",
601 .owner = THIS_MODULE, 602 .owner = THIS_MODULE,
603 .pm = &wm8994_pm_ops,
602 }, 604 },
603 .probe = wm8994_i2c_probe, 605 .probe = wm8994_i2c_probe,
604 .remove = wm8994_i2c_remove, 606 .remove = wm8994_i2c_remove,
605 .suspend = wm8994_i2c_suspend,
606 .resume = wm8994_i2c_resume,
607 .id_table = wm8994_i2c_id, 607 .id_table = wm8994_i2c_id,
608}; 608};
609 609
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 8400eb1ee5db..29e8faf9c01c 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -156,16 +156,16 @@ static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
156 return &wm8994_irqs[irq - wm8994->irq_base]; 156 return &wm8994_irqs[irq - wm8994->irq_base];
157} 157}
158 158
159static void wm8994_irq_lock(unsigned int irq) 159static void wm8994_irq_lock(struct irq_data *data)
160{ 160{
161 struct wm8994 *wm8994 = get_irq_chip_data(irq); 161 struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
162 162
163 mutex_lock(&wm8994->irq_lock); 163 mutex_lock(&wm8994->irq_lock);
164} 164}
165 165
166static void wm8994_irq_sync_unlock(unsigned int irq) 166static void wm8994_irq_sync_unlock(struct irq_data *data)
167{ 167{
168 struct wm8994 *wm8994 = get_irq_chip_data(irq); 168 struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
169 int i; 169 int i;
170 170
171 for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) { 171 for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
@@ -182,28 +182,30 @@ static void wm8994_irq_sync_unlock(unsigned int irq)
182 mutex_unlock(&wm8994->irq_lock); 182 mutex_unlock(&wm8994->irq_lock);
183} 183}
184 184
185static void wm8994_irq_unmask(unsigned int irq) 185static void wm8994_irq_unmask(struct irq_data *data)
186{ 186{
187 struct wm8994 *wm8994 = get_irq_chip_data(irq); 187 struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
188 struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq); 188 struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
189 data->irq);
189 190
190 wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; 191 wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
191} 192}
192 193
193static void wm8994_irq_mask(unsigned int irq) 194static void wm8994_irq_mask(struct irq_data *data)
194{ 195{
195 struct wm8994 *wm8994 = get_irq_chip_data(irq); 196 struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
196 struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq); 197 struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
198 data->irq);
197 199
198 wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; 200 wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
199} 201}
200 202
201static struct irq_chip wm8994_irq_chip = { 203static struct irq_chip wm8994_irq_chip = {
202 .name = "wm8994", 204 .name = "wm8994",
203 .bus_lock = wm8994_irq_lock, 205 .irq_bus_lock = wm8994_irq_lock,
204 .bus_sync_unlock = wm8994_irq_sync_unlock, 206 .irq_bus_sync_unlock = wm8994_irq_sync_unlock,
205 .mask = wm8994_irq_mask, 207 .irq_mask = wm8994_irq_mask,
206 .unmask = wm8994_irq_unmask, 208 .irq_unmask = wm8994_irq_unmask,
207}; 209};
208 210
209/* The processing of the primary interrupt occurs in a thread so that 211/* The processing of the primary interrupt occurs in a thread so that
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1e1a4be8eb6c..cc8e49db45fe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -64,7 +64,7 @@ config ATMEL_PWM
64 64
65config AB8500_PWM 65config AB8500_PWM
66 bool "AB8500 PWM support" 66 bool "AB8500 PWM support"
67 depends on AB8500_CORE 67 depends on AB8500_CORE && ARCH_U8500
68 select HAVE_PWM 68 select HAVE_PWM
69 help 69 help
70 This driver exports functions to enable/disble/config/free Pulse 70 This driver exports functions to enable/disble/config/free Pulse
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 6f6218061b0d..d02d302ee6d5 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -16,12 +16,11 @@
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pci.h> 19#include <linux/platform_device.h>
20#include <linux/cs5535.h> 20#include <linux/cs5535.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22
23#define DRV_NAME "cs5535-mfgpt" 23#define DRV_NAME "cs5535-mfgpt"
24#define MFGPT_BAR 2
25 24
26static int mfgpt_reset_timers; 25static int mfgpt_reset_timers;
27module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); 26module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
@@ -37,7 +36,7 @@ static struct cs5535_mfgpt_chip {
37 DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); 36 DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
38 resource_size_t base; 37 resource_size_t base;
39 38
40 struct pci_dev *pdev; 39 struct platform_device *pdev;
41 spinlock_t lock; 40 spinlock_t lock;
42 int initialized; 41 int initialized;
43} cs5535_mfgpt_chip; 42} cs5535_mfgpt_chip;
@@ -290,10 +289,10 @@ static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
290 return timers; 289 return timers;
291} 290}
292 291
293static int __init cs5535_mfgpt_probe(struct pci_dev *pdev, 292static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
294 const struct pci_device_id *pci_id)
295{ 293{
296 int err, t; 294 struct resource *res;
295 int err = -EIO, t;
297 296
298 /* There are two ways to get the MFGPT base address; one is by 297 /* There are two ways to get the MFGPT base address; one is by
299 * fetching it from MSR_LBAR_MFGPT, the other is by reading the 298 * fetching it from MSR_LBAR_MFGPT, the other is by reading the
@@ -302,29 +301,27 @@ static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
302 * it turns out to be unreliable in the face of crappy BIOSes, we 301 * it turns out to be unreliable in the face of crappy BIOSes, we
303 * can always go back to using MSRs.. */ 302 * can always go back to using MSRs.. */
304 303
305 err = pci_enable_device_io(pdev); 304 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
306 if (err) { 305 if (!res) {
307 dev_err(&pdev->dev, "can't enable device IO\n"); 306 dev_err(&pdev->dev, "can't fetch device resource info\n");
308 goto done; 307 goto done;
309 } 308 }
310 309
311 err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME); 310 if (!request_region(res->start, resource_size(res), pdev->name)) {
312 if (err) { 311 dev_err(&pdev->dev, "can't request region\n");
313 dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
314 goto done; 312 goto done;
315 } 313 }
316 314
317 /* set up the driver-specific struct */ 315 /* set up the driver-specific struct */
318 cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR); 316 cs5535_mfgpt_chip.base = res->start;
319 cs5535_mfgpt_chip.pdev = pdev; 317 cs5535_mfgpt_chip.pdev = pdev;
320 spin_lock_init(&cs5535_mfgpt_chip.lock); 318 spin_lock_init(&cs5535_mfgpt_chip.lock);
321 319
322 dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR, 320 dev_info(&pdev->dev, "reserved resource region %pR\n", res);
323 (unsigned long long) cs5535_mfgpt_chip.base);
324 321
325 /* detect the available timers */ 322 /* detect the available timers */
326 t = scan_timers(&cs5535_mfgpt_chip); 323 t = scan_timers(&cs5535_mfgpt_chip);
327 dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t); 324 dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
328 cs5535_mfgpt_chip.initialized = 1; 325 cs5535_mfgpt_chip.initialized = 1;
329 return 0; 326 return 0;
330 327
@@ -332,47 +329,18 @@ done:
332 return err; 329 return err;
333} 330}
334 331
335static struct pci_device_id cs5535_mfgpt_pci_tbl[] = { 332static struct platform_driver cs5535_mfgpt_drv = {
336 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) }, 333 .driver = {
337 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) }, 334 .name = DRV_NAME,
338 { 0, }, 335 .owner = THIS_MODULE,
336 },
337 .probe = cs5535_mfgpt_probe,
339}; 338};
340MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
341 339
342/*
343 * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
344 * registration stuff. It only allows only one driver to bind to each PCI
345 * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
346 * device. Instead, we manually scan for the PCI device, request a single
347 * region, and keep track of the devices that we're using.
348 */
349
350static int __init cs5535_mfgpt_scan_pci(void)
351{
352 struct pci_dev *pdev;
353 int err = -ENODEV;
354 int i;
355
356 for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
357 pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
358 cs5535_mfgpt_pci_tbl[i].device, NULL);
359 if (pdev) {
360 err = cs5535_mfgpt_probe(pdev,
361 &cs5535_mfgpt_pci_tbl[i]);
362 if (err)
363 pci_dev_put(pdev);
364
365 /* we only support a single CS5535/6 southbridge */
366 break;
367 }
368 }
369
370 return err;
371}
372 340
373static int __init cs5535_mfgpt_init(void) 341static int __init cs5535_mfgpt_init(void)
374{ 342{
375 return cs5535_mfgpt_scan_pci(); 343 return platform_driver_register(&cs5535_mfgpt_drv);
376} 344}
377 345
378module_init(cs5535_mfgpt_init); 346module_init(cs5535_mfgpt_init);
@@ -380,3 +348,4 @@ module_init(cs5535_mfgpt_init);
380MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 348MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
381MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); 349MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
382MODULE_LICENSE("GPL"); 350MODULE_LICENSE("GPL");
351MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index bac7d62866b7..0371bf502249 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,7 +462,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)
462 goto out; 462 goto out;
463 } 463 }
464 464
465 mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); 465 mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
466 if (!mmc) { 466 if (!mmc) {
467 ret = -ENOMEM; 467 ret = -ENOMEM;
468 goto out; 468 goto out;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3a0ab0e4c2b..74218ad677e4 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/mmc/host.h> 16#include <linux/mmc/host.h>
17#include <linux/err.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/irq.h> 19#include <linux/irq.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
@@ -827,8 +828,8 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)
827 } 828 }
828 829
829 host->clk = clk_get(&pdev->dev, "mmc"); 830 host->clk = clk_get(&pdev->dev, "mmc");
830 if (!host->clk) { 831 if (IS_ERR(host->clk)) {
831 ret = -ENOENT; 832 ret = PTR_ERR(host->clk);
832 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 833 dev_err(&pdev->dev, "Failed to get mmc clock\n");
833 goto err_free_host; 834 goto err_free_host;
834 } 835 }
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 563022825667..2d6de3e03e2d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -14,6 +14,7 @@
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
@@ -46,10 +47,6 @@ static unsigned int fmax = 515633;
46 * is asserted (likewise for RX) 47 * is asserted (likewise for RX)
47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 48 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
48 * is asserted (likewise for RX) 49 * is asserted (likewise for RX)
49 * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50 * and will not work at all.
51 * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52 * using DMA.
53 * @sdio: variant supports SDIO 50 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 51 * @st_clkdiv: true if using a ST-specific clock divider algorithm
55 */ 52 */
@@ -59,8 +56,6 @@ struct variant_data {
59 unsigned int datalength_bits; 56 unsigned int datalength_bits;
60 unsigned int fifosize; 57 unsigned int fifosize;
61 unsigned int fifohalfsize; 58 unsigned int fifohalfsize;
62 bool broken_blockend;
63 bool broken_blockend_dma;
64 bool sdio; 59 bool sdio;
65 bool st_clkdiv; 60 bool st_clkdiv;
66}; 61};
@@ -76,7 +71,6 @@ static struct variant_data variant_u300 = {
76 .fifohalfsize = 8 * 4, 71 .fifohalfsize = 8 * 4,
77 .clkreg_enable = 1 << 13, /* HWFCEN */ 72 .clkreg_enable = 1 << 13, /* HWFCEN */
78 .datalength_bits = 16, 73 .datalength_bits = 16,
79 .broken_blockend_dma = true,
80 .sdio = true, 74 .sdio = true,
81}; 75};
82 76
@@ -86,7 +80,6 @@ static struct variant_data variant_ux500 = {
86 .clkreg = MCI_CLK_ENABLE, 80 .clkreg = MCI_CLK_ENABLE,
87 .clkreg_enable = 1 << 14, /* HWFCEN */ 81 .clkreg_enable = 1 << 14, /* HWFCEN */
88 .datalength_bits = 24, 82 .datalength_bits = 24,
89 .broken_blockend = true,
90 .sdio = true, 83 .sdio = true,
91 .st_clkdiv = true, 84 .st_clkdiv = true,
92}; 85};
@@ -210,8 +203,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
210 host->data = data; 203 host->data = data;
211 host->size = data->blksz * data->blocks; 204 host->size = data->blksz * data->blocks;
212 host->data_xfered = 0; 205 host->data_xfered = 0;
213 host->blockend = false;
214 host->dataend = false;
215 206
216 mmci_init_sg(host, data); 207 mmci_init_sg(host, data);
217 208
@@ -288,21 +279,26 @@ static void
288mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 279mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
289 unsigned int status) 280 unsigned int status)
290{ 281{
291 struct variant_data *variant = host->variant;
292
293 /* First check for errors */ 282 /* First check for errors */
294 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 283 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
284 u32 remain, success;
285
286 /* Calculate how far we are into the transfer */
287 remain = readl(host->base + MMCIDATACNT);
288 success = data->blksz * data->blocks - remain;
289
295 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); 290 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
296 if (status & MCI_DATACRCFAIL) 291 if (status & MCI_DATACRCFAIL) {
292 /* Last block was not successful */
293 host->data_xfered = round_down(success - 1, data->blksz);
297 data->error = -EILSEQ; 294 data->error = -EILSEQ;
298 else if (status & MCI_DATATIMEOUT) 295 } else if (status & MCI_DATATIMEOUT) {
296 host->data_xfered = round_down(success, data->blksz);
299 data->error = -ETIMEDOUT; 297 data->error = -ETIMEDOUT;
300 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 298 } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
299 host->data_xfered = round_down(success, data->blksz);
301 data->error = -EIO; 300 data->error = -EIO;
302 301 }
303 /* Force-complete the transaction */
304 host->blockend = true;
305 host->dataend = true;
306 302
307 /* 303 /*
308 * We hit an error condition. Ensure that any data 304 * We hit an error condition. Ensure that any data
@@ -321,61 +317,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
321 } 317 }
322 } 318 }
323 319
324 /* 320 if (status & MCI_DATABLOCKEND)
325 * On ARM variants in PIO mode, MCI_DATABLOCKEND 321 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
326 * is always sent first, and we increase the
327 * transfered number of bytes for that IRQ. Then
328 * MCI_DATAEND follows and we conclude the transaction.
329 *
330 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
331 * doesn't seem to immediately clear from the status,
332 * so we can't use it keep count when only one irq is
333 * used because the irq will hit for other reasons, and
334 * then the flag is still up. So we use the MCI_DATAEND
335 * IRQ at the end of the entire transfer because
336 * MCI_DATABLOCKEND is broken.
337 *
338 * In the U300, the IRQs can arrive out-of-order,
339 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
340 * so for this case we use the flags "blockend" and
341 * "dataend" to make sure both IRQs have arrived before
342 * concluding the transaction. (This does not apply
343 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
344 * at all.) In DMA mode it suffers from the same problem
345 * as the Ux500.
346 */
347 if (status & MCI_DATABLOCKEND) {
348 /*
349 * Just being a little over-cautious, we do not
350 * use this progressive update if the hardware blockend
351 * flag is unreliable: since it can stay high between
352 * IRQs it will corrupt the transfer counter.
353 */
354 if (!variant->broken_blockend)
355 host->data_xfered += data->blksz;
356 host->blockend = true;
357 }
358
359 if (status & MCI_DATAEND)
360 host->dataend = true;
361 322
362 /* 323 if (status & MCI_DATAEND || data->error) {
363 * On variants with broken blockend we shall only wait for dataend,
364 * on others we must sync with the blockend signal since they can
365 * appear out-of-order.
366 */
367 if (host->dataend && (host->blockend || variant->broken_blockend)) {
368 mmci_stop_data(host); 324 mmci_stop_data(host);
369 325
370 /* Reset these flags */ 326 if (!data->error)
371 host->blockend = false; 327 /* The error clause is handled above, success! */
372 host->dataend = false;
373
374 /*
375 * Variants with broken blockend flags need to handle the
376 * end of the entire transfer here.
377 */
378 if (variant->broken_blockend && !data->error)
379 host->data_xfered += data->blksz * data->blocks; 328 host->data_xfered += data->blksz * data->blocks;
380 329
381 if (!data->stop) { 330 if (!data->stop) {
@@ -394,15 +343,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
394 343
395 host->cmd = NULL; 344 host->cmd = NULL;
396 345
397 cmd->resp[0] = readl(base + MMCIRESPONSE0);
398 cmd->resp[1] = readl(base + MMCIRESPONSE1);
399 cmd->resp[2] = readl(base + MMCIRESPONSE2);
400 cmd->resp[3] = readl(base + MMCIRESPONSE3);
401
402 if (status & MCI_CMDTIMEOUT) { 346 if (status & MCI_CMDTIMEOUT) {
403 cmd->error = -ETIMEDOUT; 347 cmd->error = -ETIMEDOUT;
404 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 348 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
405 cmd->error = -EILSEQ; 349 cmd->error = -EILSEQ;
350 } else {
351 cmd->resp[0] = readl(base + MMCIRESPONSE0);
352 cmd->resp[1] = readl(base + MMCIRESPONSE1);
353 cmd->resp[2] = readl(base + MMCIRESPONSE2);
354 cmd->resp[3] = readl(base + MMCIRESPONSE3);
406 } 355 }
407 356
408 if (!cmd->data || cmd->error) { 357 if (!cmd->data || cmd->error) {
@@ -770,7 +719,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
770 struct variant_data *variant = id->data; 719 struct variant_data *variant = id->data;
771 struct mmci_host *host; 720 struct mmci_host *host;
772 struct mmc_host *mmc; 721 struct mmc_host *mmc;
773 unsigned int mask;
774 int ret; 722 int ret;
775 723
776 /* must have platform data */ 724 /* must have platform data */
@@ -951,12 +899,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
951 goto irq0_free; 899 goto irq0_free;
952 } 900 }
953 901
954 mask = MCI_IRQENABLE; 902 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
955 /* Don't use the datablockend flag if it's broken */
956 if (variant->broken_blockend)
957 mask &= ~MCI_DATABLOCKEND;
958
959 writel(mask, host->base + MMCIMASK0);
960 903
961 amba_set_drvdata(dev, mmc); 904 amba_set_drvdata(dev, mmc);
962 905
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index df06f01aac89..c1df7b82d36c 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,7 +137,7 @@
137#define MCI_IRQENABLE \ 137#define MCI_IRQENABLE \
138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ 138 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
141 141
142/* These interrupts are directed to IRQ1 when two IRQ lines are available */ 142/* These interrupts are directed to IRQ1 when two IRQ lines are available */
143#define MCI_IRQ1MASK \ 143#define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@ struct mmci_host {
177 struct timer_list timer; 177 struct timer_list timer;
178 unsigned int oldstat; 178 unsigned int oldstat;
179 179
180 bool blockend;
181 bool dataend;
182
183 /* pio stuff */ 180 /* pio stuff */
184 struct sg_mapping_iter sg_miter; 181 struct sg_mapping_iter sg_miter;
185 unsigned int size; 182 unsigned int size;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 5decfd0bd61d..153ab977a013 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -383,14 +383,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
383 host->curr.user_pages = 0; 383 host->curr.user_pages = 0;
384 384
385 box = &nc->cmd[0]; 385 box = &nc->cmd[0];
386 for (i = 0; i < host->dma.num_ents; i++) {
387 box->cmd = CMD_MODE_BOX;
388 386
389 /* Initialize sg dma address */ 387 /* location of command block must be 64 bit aligned */
390 sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) 388 BUG_ON(host->dma.cmd_busaddr & 0x07);
391 + sg->offset;
392 389
393 if (i == (host->dma.num_ents - 1)) 390 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
391 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
392 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
393 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
394
395 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
396 host->dma.num_ents, host->dma.dir);
397 if (n == 0) {
398 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
399 mmc_hostname(host->mmc));
400 host->dma.sg = NULL;
401 host->dma.num_ents = 0;
402 return -ENOMEM;
403 }
404
405 for_each_sg(host->dma.sg, sg, n, i) {
406
407 box->cmd = CMD_MODE_BOX;
408
409 if (i == n - 1)
394 box->cmd |= CMD_LC; 410 box->cmd |= CMD_LC;
395 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? 411 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
396 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : 412 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -418,27 +434,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
418 box->cmd |= CMD_DST_CRCI(crci); 434 box->cmd |= CMD_DST_CRCI(crci);
419 } 435 }
420 box++; 436 box++;
421 sg++;
422 }
423
424 /* location of command block must be 64 bit aligned */
425 BUG_ON(host->dma.cmd_busaddr & 0x07);
426
427 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
428 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
429 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
430 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
431
432 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
433 host->dma.num_ents, host->dma.dir);
434/* dsb inside dma_map_sg will write nc out to mem as well */
435
436 if (n != host->dma.num_ents) {
437 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
438 mmc_hostname(host->mmc));
439 host->dma.sg = NULL;
440 host->dma.num_ents = 0;
441 return -ENOMEM;
442 } 437 }
443 438
444 return 0; 439 return 0;
@@ -1331,9 +1326,6 @@ msmsdcc_probe(struct platform_device *pdev)
1331 if (host->timer.function) 1326 if (host->timer.function)
1332 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); 1327 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1333 1328
1334#if BUSCLK_PWRSAVE
1335 msmsdcc_disable_clocks(host, 1);
1336#endif
1337 return 0; 1329 return 0;
1338 cmd_irq_free: 1330 cmd_irq_free:
1339 free_irq(cmd_irqres->start, host); 1331 free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index fa19d849a920..dd84124f4209 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
13 * your option) any later version. 13 * your option) any later version.
14 */ 14 */
15 15
16#include <linux/err.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/io.h> 19#include <linux/io.h>
@@ -20,8 +21,12 @@
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/of.h> 22#include <linux/of.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
23#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27#ifdef CONFIG_PPC
24#include <asm/machdep.h> 28#include <asm/machdep.h>
29#endif
25#include "sdhci-of.h" 30#include "sdhci-of.h"
26#include "sdhci.h" 31#include "sdhci.h"
27 32
@@ -112,7 +117,11 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
112 return true; 117 return true;
113 118
114 /* Old device trees don't have the wp-inverted property. */ 119 /* Old device trees don't have the wp-inverted property. */
120#ifdef CONFIG_PPC
115 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); 121 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
122#else
123 return false;
124#endif
116} 125}
117 126
118static int __devinit sdhci_of_probe(struct platform_device *ofdev, 127static int __devinit sdhci_of_probe(struct platform_device *ofdev,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 17203586305c..5309ab95aada 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -277,10 +277,43 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
277 host->clock = clock; 277 host->clock = clock;
278} 278}
279 279
280/**
281 * sdhci_s3c_platform_8bit_width - support 8bit buswidth
282 * @host: The SDHCI host being queried
283 * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
284 *
285 * We have 8-bit width support but is not a v3 controller.
286 * So we add platform_8bit_width() and support 8bit width.
287 */
288static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
289{
290 u8 ctrl;
291
292 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
293
294 switch (width) {
295 case MMC_BUS_WIDTH_8:
296 ctrl |= SDHCI_CTRL_8BITBUS;
297 ctrl &= ~SDHCI_CTRL_4BITBUS;
298 break;
299 case MMC_BUS_WIDTH_4:
300 ctrl |= SDHCI_CTRL_4BITBUS;
301 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break;
303 default:
304 break;
305 }
306
307 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
308
309 return 0;
310}
311
280static struct sdhci_ops sdhci_s3c_ops = { 312static struct sdhci_ops sdhci_s3c_ops = {
281 .get_max_clock = sdhci_s3c_get_max_clk, 313 .get_max_clock = sdhci_s3c_get_max_clk,
282 .set_clock = sdhci_s3c_set_clock, 314 .set_clock = sdhci_s3c_set_clock,
283 .get_min_clock = sdhci_s3c_get_min_clock, 315 .get_min_clock = sdhci_s3c_get_min_clock,
316 .platform_8bit_width = sdhci_s3c_platform_8bit_width,
284}; 317};
285 318
286static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 319static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -473,6 +506,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
473 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 506 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
474 host->mmc->caps = MMC_CAP_NONREMOVABLE; 507 host->mmc->caps = MMC_CAP_NONREMOVABLE;
475 508
509 if (pdata->host_caps)
510 host->mmc->caps |= pdata->host_caps;
511
476 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | 512 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
477 SDHCI_QUIRK_32BIT_DMA_SIZE); 513 SDHCI_QUIRK_32BIT_DMA_SIZE);
478 514
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f8f65df9b017..f08f944ac53c 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/usb.h> 20#include <linux/usb.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/usb.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
25#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index b1f768917395..77414702cb00 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -53,9 +53,10 @@ config MTD_PARTITIONS
53 devices. Partitioning on NFTL 'devices' is a different - that's the 53 devices. Partitioning on NFTL 'devices' is a different - that's the
54 'normal' form of partitioning used on a block device. 54 'normal' form of partitioning used on a block device.
55 55
56if MTD_PARTITIONS
57
56config MTD_REDBOOT_PARTS 58config MTD_REDBOOT_PARTS
57 tristate "RedBoot partition table parsing" 59 tristate "RedBoot partition table parsing"
58 depends on MTD_PARTITIONS
59 ---help--- 60 ---help---
60 RedBoot is a ROM monitor and bootloader which deals with multiple 61 RedBoot is a ROM monitor and bootloader which deals with multiple
61 'images' in flash devices by putting a table one of the erase 62 'images' in flash devices by putting a table one of the erase
@@ -72,9 +73,10 @@ config MTD_REDBOOT_PARTS
72 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for 73 SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
73 example. 74 example.
74 75
76if MTD_REDBOOT_PARTS
77
75config MTD_REDBOOT_DIRECTORY_BLOCK 78config MTD_REDBOOT_DIRECTORY_BLOCK
76 int "Location of RedBoot partition table" 79 int "Location of RedBoot partition table"
77 depends on MTD_REDBOOT_PARTS
78 default "-1" 80 default "-1"
79 ---help--- 81 ---help---
80 This option is the Linux counterpart to the 82 This option is the Linux counterpart to the
@@ -91,18 +93,18 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
91 93
92config MTD_REDBOOT_PARTS_UNALLOCATED 94config MTD_REDBOOT_PARTS_UNALLOCATED
93 bool "Include unallocated flash regions" 95 bool "Include unallocated flash regions"
94 depends on MTD_REDBOOT_PARTS
95 help 96 help
96 If you need to register each unallocated flash region as a MTD 97 If you need to register each unallocated flash region as a MTD
97 'partition', enable this option. 98 'partition', enable this option.
98 99
99config MTD_REDBOOT_PARTS_READONLY 100config MTD_REDBOOT_PARTS_READONLY
100 bool "Force read-only for RedBoot system images" 101 bool "Force read-only for RedBoot system images"
101 depends on MTD_REDBOOT_PARTS
102 help 102 help
103 If you need to force read-only for 'RedBoot', 'RedBoot Config' and 103 If you need to force read-only for 'RedBoot', 'RedBoot Config' and
104 'FIS directory' images, enable this option. 104 'FIS directory' images, enable this option.
105 105
106endif # MTD_REDBOOT_PARTS
107
106config MTD_CMDLINE_PARTS 108config MTD_CMDLINE_PARTS
107 bool "Command line partition table parsing" 109 bool "Command line partition table parsing"
108 depends on MTD_PARTITIONS = "y" && MTD = "y" 110 depends on MTD_PARTITIONS = "y" && MTD = "y"
@@ -142,7 +144,7 @@ config MTD_CMDLINE_PARTS
142 144
143config MTD_AFS_PARTS 145config MTD_AFS_PARTS
144 tristate "ARM Firmware Suite partition parsing" 146 tristate "ARM Firmware Suite partition parsing"
145 depends on ARM && MTD_PARTITIONS 147 depends on ARM
146 ---help--- 148 ---help---
147 The ARM Firmware Suite allows the user to divide flash devices into 149 The ARM Firmware Suite allows the user to divide flash devices into
148 multiple 'images'. Each such image has a header containing its name 150 multiple 'images'. Each such image has a header containing its name
@@ -158,8 +160,8 @@ config MTD_AFS_PARTS
158 example. 160 example.
159 161
160config MTD_OF_PARTS 162config MTD_OF_PARTS
161 tristate "Flash partition map based on OF description" 163 def_bool y
162 depends on OF && MTD_PARTITIONS 164 depends on OF
163 help 165 help
164 This provides a partition parsing function which derives 166 This provides a partition parsing function which derives
165 the partition map from the children of the flash node, 167 the partition map from the children of the flash node,
@@ -167,10 +169,11 @@ config MTD_OF_PARTS
167 169
168config MTD_AR7_PARTS 170config MTD_AR7_PARTS
169 tristate "TI AR7 partitioning support" 171 tristate "TI AR7 partitioning support"
170 depends on MTD_PARTITIONS
171 ---help--- 172 ---help---
172 TI AR7 partitioning support 173 TI AR7 partitioning support
173 174
175endif # MTD_PARTITIONS
176
174comment "User Modules And Translation Layers" 177comment "User Modules And Translation Layers"
175 178
176config MTD_CHAR 179config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 760abc533395..d4e7f25b1ebb 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -6,13 +6,13 @@
6obj-$(CONFIG_MTD) += mtd.o 6obj-$(CONFIG_MTD) += mtd.o
7mtd-y := mtdcore.o mtdsuper.o 7mtd-y := mtdcore.o mtdsuper.o
8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o 8mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
9mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o
9 10
10obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o 11obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
11obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 12obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
12obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 13obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
13obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 14obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
14obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o 15obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
15obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
16 16
17# 'Users' - code which presents functionality to userspace. 17# 'Users' - code which presents functionality to userspace.
18obj-$(CONFIG_MTD_CHAR) += mtdchar.o 18obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index ad9268b44416..a8c3e1c9b02a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -162,7 +162,7 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
162#endif 162#endif
163 163
164/* Atmel chips don't use the same PRI format as Intel chips */ 164/* Atmel chips don't use the same PRI format as Intel chips */
165static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 165static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166{ 166{
167 struct map_info *map = mtd->priv; 167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv; 168 struct cfi_private *cfi = map->fldrv_priv;
@@ -202,7 +202,7 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202 cfi->cfiq->BufWriteTimeoutMax = 0; 202 cfi->cfiq->BufWriteTimeoutMax = 0;
203} 203}
204 204
205static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param) 205static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206{ 206{
207 struct map_info *map = mtd->priv; 207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv; 208 struct cfi_private *cfi = map->fldrv_priv;
@@ -214,7 +214,7 @@ static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
214 214
215#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 215#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 216/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) 217static void fixup_intel_strataflash(struct mtd_info *mtd)
218{ 218{
219 struct map_info *map = mtd->priv; 219 struct map_info *map = mtd->priv;
220 struct cfi_private *cfi = map->fldrv_priv; 220 struct cfi_private *cfi = map->fldrv_priv;
@@ -227,7 +227,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
227#endif 227#endif
228 228
229#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 229#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230static void fixup_no_write_suspend(struct mtd_info *mtd, void* param) 230static void fixup_no_write_suspend(struct mtd_info *mtd)
231{ 231{
232 struct map_info *map = mtd->priv; 232 struct map_info *map = mtd->priv;
233 struct cfi_private *cfi = map->fldrv_priv; 233 struct cfi_private *cfi = map->fldrv_priv;
@@ -240,7 +240,7 @@ static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
240} 240}
241#endif 241#endif
242 242
243static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param) 243static void fixup_st_m28w320ct(struct mtd_info *mtd)
244{ 244{
245 struct map_info *map = mtd->priv; 245 struct map_info *map = mtd->priv;
246 struct cfi_private *cfi = map->fldrv_priv; 246 struct cfi_private *cfi = map->fldrv_priv;
@@ -249,7 +249,7 @@ static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
249 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ 249 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
250} 250}
251 251
252static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param) 252static void fixup_st_m28w320cb(struct mtd_info *mtd)
253{ 253{
254 struct map_info *map = mtd->priv; 254 struct map_info *map = mtd->priv;
255 struct cfi_private *cfi = map->fldrv_priv; 255 struct cfi_private *cfi = map->fldrv_priv;
@@ -259,7 +259,7 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
259 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 259 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260}; 260};
261 261
262static void fixup_use_point(struct mtd_info *mtd, void *param) 262static void fixup_use_point(struct mtd_info *mtd)
263{ 263{
264 struct map_info *map = mtd->priv; 264 struct map_info *map = mtd->priv;
265 if (!mtd->point && map_is_linear(map)) { 265 if (!mtd->point && map_is_linear(map)) {
@@ -268,7 +268,7 @@ static void fixup_use_point(struct mtd_info *mtd, void *param)
268 } 268 }
269} 269}
270 270
271static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 271static void fixup_use_write_buffers(struct mtd_info *mtd)
272{ 272{
273 struct map_info *map = mtd->priv; 273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv; 274 struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
282/* 282/*
283 * Some chips power-up with all sectors locked by default. 283 * Some chips power-up with all sectors locked by default.
284 */ 284 */
285static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) 285static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286{ 286{
287 struct map_info *map = mtd->priv; 287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv; 288 struct cfi_private *cfi = map->fldrv_priv;
@@ -295,31 +295,31 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
295} 295}
296 296
297static struct cfi_fixup cfi_fixup_table[] = { 297static struct cfi_fixup cfi_fixup_table[] = {
298 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 298 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL }, 299 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL }, 300 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 301#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303#endif 303#endif
304#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND 304#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, 305 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306#endif 306#endif
307#if !FORCE_WORD_WRITE 307#if !FORCE_WORD_WRITE
308 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL }, 308 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309#endif 309#endif
310 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 310 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 311 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 312 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313 { 0, 0, NULL, NULL } 313 { 0, 0, NULL }
314}; 314};
315 315
316static struct cfi_fixup jedec_fixup_table[] = { 316static struct cfi_fixup jedec_fixup_table[] = {
317 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, 317 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
318 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, 318 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
319 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, 319 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
320 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, }, 320 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
321 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, }, 321 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
322 { 0, 0, NULL, NULL } 322 { 0, 0, NULL }
323}; 323};
324static struct cfi_fixup fixup_table[] = { 324static struct cfi_fixup fixup_table[] = {
325 /* The CFI vendor ids and the JEDEC vendor IDs appear 325 /* The CFI vendor ids and the JEDEC vendor IDs appear
@@ -327,8 +327,8 @@ static struct cfi_fixup fixup_table[] = {
327 * well. This table is to pick all cases where 327 * well. This table is to pick all cases where
328 * we know that is the case. 328 * we know that is the case.
329 */ 329 */
330 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL }, 330 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331 { 0, 0, NULL, NULL } 331 { 0, 0, NULL }
332}; 332};
333 333
334static void cfi_fixup_major_minor(struct cfi_private *cfi, 334static void cfi_fixup_major_minor(struct cfi_private *cfi,
@@ -455,6 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
455 mtd->flags = MTD_CAP_NORFLASH; 455 mtd->flags = MTD_CAP_NORFLASH;
456 mtd->name = map->name; 456 mtd->name = map->name;
457 mtd->writesize = 1; 457 mtd->writesize = 1;
458 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
458 459
459 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; 460 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
460 461
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 3b8e32d87977..f072fcfde04e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -134,7 +134,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp)
134 134
135#ifdef AMD_BOOTLOC_BUG 135#ifdef AMD_BOOTLOC_BUG
136/* Wheee. Bring me the head of someone at AMD. */ 136/* Wheee. Bring me the head of someone at AMD. */
137static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 137static void fixup_amd_bootblock(struct mtd_info *mtd)
138{ 138{
139 struct map_info *map = mtd->priv; 139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv; 140 struct cfi_private *cfi = map->fldrv_priv;
@@ -186,7 +186,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
186} 186}
187#endif 187#endif
188 188
189static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 189static void fixup_use_write_buffers(struct mtd_info *mtd)
190{ 190{
191 struct map_info *map = mtd->priv; 191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
@@ -197,7 +197,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
197} 197}
198 198
199/* Atmel chips don't use the same PRI format as AMD chips */ 199/* Atmel chips don't use the same PRI format as AMD chips */
200static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 200static void fixup_convert_atmel_pri(struct mtd_info *mtd)
201{ 201{
202 struct map_info *map = mtd->priv; 202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv; 203 struct cfi_private *cfi = map->fldrv_priv;
@@ -228,14 +228,14 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
228 cfi->cfiq->BufWriteTimeoutMax = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0;
229} 229}
230 230
231static void fixup_use_secsi(struct mtd_info *mtd, void *param) 231static void fixup_use_secsi(struct mtd_info *mtd)
232{ 232{
233 /* Setup for chips with a secsi area */ 233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
236} 236}
237 237
238static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 238static void fixup_use_erase_chip(struct mtd_info *mtd)
239{ 239{
240 struct map_info *map = mtd->priv; 240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv; 241 struct cfi_private *cfi = map->fldrv_priv;
@@ -250,7 +250,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default. 251 * locked by default.
252 */ 252 */
253static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
254{ 254{
255 mtd->lock = cfi_atmel_lock; 255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock; 256 mtd->unlock = cfi_atmel_unlock;
@@ -271,7 +271,7 @@ static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
271 cfi->cfiq->NumEraseRegions = 1; 271 cfi->cfiq->NumEraseRegions = 1;
272} 272}
273 273
274static void fixup_sst39vf(struct mtd_info *mtd, void *param) 274static void fixup_sst39vf(struct mtd_info *mtd)
275{ 275{
276 struct map_info *map = mtd->priv; 276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv; 277 struct cfi_private *cfi = map->fldrv_priv;
@@ -282,7 +282,7 @@ static void fixup_sst39vf(struct mtd_info *mtd, void *param)
282 cfi->addr_unlock2 = 0x2AAA; 282 cfi->addr_unlock2 = 0x2AAA;
283} 283}
284 284
285static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) 285static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
286{ 286{
287 struct map_info *map = mtd->priv; 287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv; 288 struct cfi_private *cfi = map->fldrv_priv;
@@ -295,12 +295,12 @@ static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
295 cfi->sector_erase_cmd = CMD(0x50); 295 cfi->sector_erase_cmd = CMD(0x50);
296} 296}
297 297
298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param) 298static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
299{ 299{
300 struct map_info *map = mtd->priv; 300 struct map_info *map = mtd->priv;
301 struct cfi_private *cfi = map->fldrv_priv; 301 struct cfi_private *cfi = map->fldrv_priv;
302 302
303 fixup_sst39vf_rev_b(mtd, param); 303 fixup_sst39vf_rev_b(mtd);
304 304
305 /* 305 /*
306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
@@ -310,7 +310,7 @@ static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd, void *param)
310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
311} 311}
312 312
313static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 313static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
314{ 314{
315 struct map_info *map = mtd->priv; 315 struct map_info *map = mtd->priv;
316 struct cfi_private *cfi = map->fldrv_priv; 316 struct cfi_private *cfi = map->fldrv_priv;
@@ -321,7 +321,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
321 } 321 }
322} 322}
323 323
324static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 324static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
325{ 325{
326 struct map_info *map = mtd->priv; 326 struct map_info *map = mtd->priv;
327 struct cfi_private *cfi = map->fldrv_priv; 327 struct cfi_private *cfi = map->fldrv_priv;
@@ -334,47 +334,47 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
334 334
335/* Used to fix CFI-Tables of chips without Extended Query Tables */ 335/* Used to fix CFI-Tables of chips without Extended Query Tables */
336static struct cfi_fixup cfi_nopri_fixup_table[] = { 336static struct cfi_fixup cfi_nopri_fixup_table[] = {
337 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, /* SST39VF1602 */ 337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
338 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, /* SST39VF1601 */ 338 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
339 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, /* SST39VF3202 */ 339 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
340 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, /* SST39VF3201 */ 340 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
341 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3202B */ 341 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
342 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF3201B */ 342 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
343 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6402B */ 343 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
344 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, /* SST39VF6401B */ 344 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
345 { 0, 0, NULL, NULL } 345 { 0, 0, NULL }
346}; 346};
347 347
348static struct cfi_fixup cfi_fixup_table[] = { 348static struct cfi_fixup cfi_fixup_table[] = {
349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
350#ifdef AMD_BOOTLOC_BUG 350#ifdef AMD_BOOTLOC_BUG
351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
352 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 352 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
353#endif 353#endif
354 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 354 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
355 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 355 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
356 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 356 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
357 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 357 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
358 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 358 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
359 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 359 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
360 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 360 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 361 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 362 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 363 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
364 { CFI_MFR_SST, 0x536A, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6402 */ 364 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
365 { CFI_MFR_SST, 0x536B, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6401 */ 365 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
366 { CFI_MFR_SST, 0x536C, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6404 */ 366 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
367 { CFI_MFR_SST, 0x536D, fixup_sst38vf640x_sectorsize, NULL, }, /* SST38VF6403 */ 367 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
368#if !FORCE_WORD_WRITE 368#if !FORCE_WORD_WRITE
369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
370#endif 370#endif
371 { 0, 0, NULL, NULL } 371 { 0, 0, NULL }
372}; 372};
373static struct cfi_fixup jedec_fixup_table[] = { 373static struct cfi_fixup jedec_fixup_table[] = {
374 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 374 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
375 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 375 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
376 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 376 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
377 { 0, 0, NULL, NULL } 377 { 0, 0, NULL }
378}; 378};
379 379
380static struct cfi_fixup fixup_table[] = { 380static struct cfi_fixup fixup_table[] = {
@@ -383,18 +383,30 @@ static struct cfi_fixup fixup_table[] = {
383 * well. This table is to pick all cases where 383 * well. This table is to pick all cases where
384 * we know that is the case. 384 * we know that is the case.
385 */ 385 */
386 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 386 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
387 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 387 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
388 { 0, 0, NULL, NULL } 388 { 0, 0, NULL }
389}; 389};
390 390
391 391
392static void cfi_fixup_major_minor(struct cfi_private *cfi, 392static void cfi_fixup_major_minor(struct cfi_private *cfi,
393 struct cfi_pri_amdstd *extp) 393 struct cfi_pri_amdstd *extp)
394{ 394{
395 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 395 if (cfi->mfr == CFI_MFR_SAMSUNG) {
396 extp->MajorVersion == '0') 396 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
397 extp->MajorVersion = '1'; 397 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
398 /*
399 * Samsung K8P2815UQB and K8D6x16UxM chips
400 * report major=0 / minor=0.
401 * K8D3x16UxC chips report major=3 / minor=3.
402 */
403 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
404 " Extended Query version to 1.%c\n",
405 extp->MinorVersion);
406 extp->MajorVersion = '1';
407 }
408 }
409
398 /* 410 /*
399 * SST 38VF640x chips report major=0xFF / minor=0xFF. 411 * SST 38VF640x chips report major=0xFF / minor=0xFF.
400 */ 412 */
@@ -428,6 +440,10 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
428 mtd->flags = MTD_CAP_NORFLASH; 440 mtd->flags = MTD_CAP_NORFLASH;
429 mtd->name = map->name; 441 mtd->name = map->name;
430 mtd->writesize = 1; 442 mtd->writesize = 1;
443 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
444
445 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
446 __func__, mtd->writebufsize);
431 447
432 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 448 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
433 449
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 314af1f5a370..c04b7658abe9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -238,6 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
238 mtd->resume = cfi_staa_resume; 238 mtd->resume = cfi_staa_resume;
239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 239 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 240 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
241 mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize;
241 map->fldrv = &cfi_staa_chipdrv; 242 map->fldrv = &cfi_staa_chipdrv;
242 __module_get(THIS_MODULE); 243 __module_get(THIS_MODULE);
243 mtd->name = map->name; 244 mtd->name = map->name;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 360525c637d2..6ae3d111e1e7 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -156,7 +156,7 @@ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
156 for (f=fixups; f->fixup; f++) { 156 for (f=fixups; f->fixup; f++) {
157 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && 157 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
158 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { 158 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
159 f->fixup(mtd, f->param); 159 f->fixup(mtd);
160 } 160 }
161 } 161 }
162} 162}
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index d18064977192..5e3cc80128aa 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -98,7 +98,7 @@ static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
98 return ret; 98 return ret;
99} 99}
100 100
101static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param) 101static void fixup_use_fwh_lock(struct mtd_info *mtd)
102{ 102{
103 printk(KERN_NOTICE "using fwh lock/unlock method\n"); 103 printk(KERN_NOTICE "using fwh lock/unlock method\n");
104 /* Setup for the chips with the fwh lock method */ 104 /* Setup for the chips with the fwh lock method */
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index bf5a002209bd..e4eba6cc1b2e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -51,6 +51,10 @@
51#define OPCODE_WRDI 0x04 /* Write disable */ 51#define OPCODE_WRDI 0x04 /* Write disable */
52#define OPCODE_AAI_WP 0xad /* Auto address increment word program */ 52#define OPCODE_AAI_WP 0xad /* Auto address increment word program */
53 53
54/* Used for Macronix flashes only. */
55#define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */
56#define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */
57
54/* Status Register bits. */ 58/* Status Register bits. */
55#define SR_WIP 1 /* Write in progress */ 59#define SR_WIP 1 /* Write in progress */
56#define SR_WEL 2 /* Write enable latch */ 60#define SR_WEL 2 /* Write enable latch */
@@ -62,7 +66,7 @@
62 66
63/* Define max times to check status register before we give up. */ 67/* Define max times to check status register before we give up. */
64#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ 68#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
65#define MAX_CMD_SIZE 4 69#define MAX_CMD_SIZE 5
66 70
67#ifdef CONFIG_M25PXX_USE_FAST_READ 71#ifdef CONFIG_M25PXX_USE_FAST_READ
68#define OPCODE_READ OPCODE_FAST_READ 72#define OPCODE_READ OPCODE_FAST_READ
@@ -152,6 +156,16 @@ static inline int write_disable(struct m25p *flash)
152} 156}
153 157
154/* 158/*
159 * Enable/disable 4-byte addressing mode.
160 */
161static inline int set_4byte(struct m25p *flash, int enable)
162{
163 u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B;
164
165 return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
166}
167
168/*
155 * Service routine to read status register until ready, or timeout occurs. 169 * Service routine to read status register until ready, or timeout occurs.
156 * Returns non-zero if error. 170 * Returns non-zero if error.
157 */ 171 */
@@ -207,6 +221,7 @@ static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
207 cmd[1] = addr >> (flash->addr_width * 8 - 8); 221 cmd[1] = addr >> (flash->addr_width * 8 - 8);
208 cmd[2] = addr >> (flash->addr_width * 8 - 16); 222 cmd[2] = addr >> (flash->addr_width * 8 - 16);
209 cmd[3] = addr >> (flash->addr_width * 8 - 24); 223 cmd[3] = addr >> (flash->addr_width * 8 - 24);
224 cmd[4] = addr >> (flash->addr_width * 8 - 32);
210} 225}
211 226
212static int m25p_cmdsz(struct m25p *flash) 227static int m25p_cmdsz(struct m25p *flash)
@@ -482,6 +497,10 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
482 size_t actual; 497 size_t actual;
483 int cmd_sz, ret; 498 int cmd_sz, ret;
484 499
500 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
501 dev_name(&flash->spi->dev), __func__, "to",
502 (u32)to, len);
503
485 *retlen = 0; 504 *retlen = 0;
486 505
487 /* sanity checks */ 506 /* sanity checks */
@@ -607,7 +626,6 @@ struct flash_info {
607 .sector_size = (_sector_size), \ 626 .sector_size = (_sector_size), \
608 .n_sectors = (_n_sectors), \ 627 .n_sectors = (_n_sectors), \
609 .page_size = 256, \ 628 .page_size = 256, \
610 .addr_width = 3, \
611 .flags = (_flags), \ 629 .flags = (_flags), \
612 }) 630 })
613 631
@@ -635,7 +653,7 @@ static const struct spi_device_id m25p_ids[] = {
635 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, 653 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
636 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, 654 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
637 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, 655 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
638 { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, 656 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
639 657
640 /* EON -- en25pxx */ 658 /* EON -- en25pxx */
641 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, 659 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
@@ -653,6 +671,8 @@ static const struct spi_device_id m25p_ids[] = {
653 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, 671 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
654 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, 672 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
655 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, 673 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
674 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
675 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
656 676
657 /* Spansion -- single (large) sector size only, at least 677 /* Spansion -- single (large) sector size only, at least
658 * for the chips listed here (without boot sectors). 678 * for the chips listed here (without boot sectors).
@@ -764,6 +784,7 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
764 return &m25p_ids[tmp]; 784 return &m25p_ids[tmp];
765 } 785 }
766 } 786 }
787 dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
767 return ERR_PTR(-ENODEV); 788 return ERR_PTR(-ENODEV);
768} 789}
769 790
@@ -883,7 +904,17 @@ static int __devinit m25p_probe(struct spi_device *spi)
883 904
884 flash->mtd.dev.parent = &spi->dev; 905 flash->mtd.dev.parent = &spi->dev;
885 flash->page_size = info->page_size; 906 flash->page_size = info->page_size;
886 flash->addr_width = info->addr_width; 907
908 if (info->addr_width)
909 flash->addr_width = info->addr_width;
910 else {
911 /* enable 4-byte addressing if the device exceeds 16MiB */
912 if (flash->mtd.size > 0x1000000) {
913 flash->addr_width = 4;
914 set_4byte(flash, 1);
915 } else
916 flash->addr_width = 3;
917 }
887 918
888 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, 919 dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
889 (long long)flash->mtd.size >> 10); 920 (long long)flash->mtd.size >> 10);
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 684247a8a5ed..c163e619abc9 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -335,7 +335,7 @@ out:
335 return ret; 335 return ret;
336} 336}
337 337
338static struct flash_info *__init sst25l_match_device(struct spi_device *spi) 338static struct flash_info *__devinit sst25l_match_device(struct spi_device *spi)
339{ 339{
340 struct flash_info *flash_info = NULL; 340 struct flash_info *flash_info = NULL;
341 struct spi_message m; 341 struct spi_message m;
@@ -375,7 +375,7 @@ static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
375 return flash_info; 375 return flash_info;
376} 376}
377 377
378static int __init sst25l_probe(struct spi_device *spi) 378static int __devinit sst25l_probe(struct spi_device *spi)
379{ 379{
380 struct flash_info *flash_info; 380 struct flash_info *flash_info;
381 struct sst25l_flash *flash; 381 struct sst25l_flash *flash;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 19fe92db0c46..77d64ce19e9f 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -149,11 +149,8 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
149 if (request_resource(&iomem_resource, &window->rsrc)) { 149 if (request_resource(&iomem_resource, &window->rsrc)) {
150 window->rsrc.parent = NULL; 150 window->rsrc.parent = NULL;
151 printk(KERN_ERR MOD_NAME 151 printk(KERN_ERR MOD_NAME
152 " %s(): Unable to register resource" 152 " %s(): Unable to register resource %pR - kernel bug?\n",
153 " 0x%.16llx-0x%.16llx - kernel bug?\n", 153 __func__, &window->rsrc);
154 __func__,
155 (unsigned long long)window->rsrc.start,
156 (unsigned long long)window->rsrc.end);
157 } 154 }
158 155
159 156
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index d175c120ee84..1f3049590d9e 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -196,10 +196,15 @@ static int bcm963xx_probe(struct platform_device *pdev)
196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map); 196 bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
197 if (!bcm963xx_mtd_info) { 197 if (!bcm963xx_mtd_info) {
198 dev_err(&pdev->dev, "failed to probe using CFI\n"); 198 dev_err(&pdev->dev, "failed to probe using CFI\n");
199 bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
200 if (bcm963xx_mtd_info)
201 goto probe_ok;
202 dev_err(&pdev->dev, "failed to probe using JEDEC\n");
199 err = -EIO; 203 err = -EIO;
200 goto err_probe; 204 goto err_probe;
201 } 205 }
202 206
207probe_ok:
203 bcm963xx_mtd_info->owner = THIS_MODULE; 208 bcm963xx_mtd_info->owner = THIS_MODULE;
204 209
205 /* This is mutually exclusive */ 210 /* This is mutually exclusive */
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index ddb462bea9b5..5fdb7b26cea3 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -178,11 +178,8 @@ static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
178 if (request_resource(&iomem_resource, &window->rsrc)) { 178 if (request_resource(&iomem_resource, &window->rsrc)) {
179 window->rsrc.parent = NULL; 179 window->rsrc.parent = NULL;
180 printk(KERN_ERR MOD_NAME 180 printk(KERN_ERR MOD_NAME
181 " %s(): Unable to register resource" 181 " %s(): Unable to register resource %pR - kernel bug?\n",
182 " 0x%.016llx-0x%.016llx - kernel bug?\n", 182 __func__, &window->rsrc);
183 __func__,
184 (unsigned long long)window->rsrc.start,
185 (unsigned long long)window->rsrc.end);
186 } 183 }
187 184
188 185
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index d12c93dc1aad..4feb7507ab7c 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -242,12 +242,9 @@ static int __devinit esb2rom_init_one(struct pci_dev *pdev,
242 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 242 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
243 if (request_resource(&iomem_resource, &window->rsrc)) { 243 if (request_resource(&iomem_resource, &window->rsrc)) {
244 window->rsrc.parent = NULL; 244 window->rsrc.parent = NULL;
245 printk(KERN_DEBUG MOD_NAME 245 printk(KERN_DEBUG MOD_NAME ": "
246 ": %s(): Unable to register resource" 246 "%s(): Unable to register resource %pR - kernel bug?\n",
247 " 0x%.08llx-0x%.08llx - kernel bug?\n", 247 __func__, &window->rsrc);
248 __func__,
249 (unsigned long long)window->rsrc.start,
250 (unsigned long long)window->rsrc.end);
251 } 248 }
252 249
253 /* Map the firmware hub into my address space. */ 250 /* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index f102bf243a74..1337a4191a0c 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -175,12 +175,9 @@ static int __devinit ichxrom_init_one (struct pci_dev *pdev,
175 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 175 window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
176 if (request_resource(&iomem_resource, &window->rsrc)) { 176 if (request_resource(&iomem_resource, &window->rsrc)) {
177 window->rsrc.parent = NULL; 177 window->rsrc.parent = NULL;
178 printk(KERN_DEBUG MOD_NAME 178 printk(KERN_DEBUG MOD_NAME ": "
179 ": %s(): Unable to register resource" 179 "%s(): Unable to register resource %pR - kernel bug?\n",
180 " 0x%.16llx-0x%.16llx - kernel bug?\n", 180 __func__, &window->rsrc);
181 __func__,
182 (unsigned long long)window->rsrc.start,
183 (unsigned long long)window->rsrc.end);
184 } 181 }
185 182
186 /* Map the firmware hub into my address space. */ 183 /* Map the firmware hub into my address space. */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 9861814aa027..8506578e6a35 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -274,9 +274,7 @@ static int __devinit of_flash_probe(struct platform_device *dev,
274 continue; 274 continue;
275 } 275 }
276 276
277 dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n", 277 dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
278 (unsigned long long)res.start,
279 (unsigned long long)res.end);
280 278
281 err = -EBUSY; 279 err = -EBUSY;
282 res_size = resource_size(&res); 280 res_size = resource_size(&res);
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index b5391ebb736e..027e628a4f1d 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -166,9 +166,8 @@ static int __init init_scx200_docflash(void)
166 outl(pmr, scx200_cb_base + SCx200_PMR); 166 outl(pmr, scx200_cb_base + SCx200_PMR);
167 } 167 }
168 168
169 printk(KERN_INFO NAME ": DOCCS mapped at 0x%llx-0x%llx, width %d\n", 169 printk(KERN_INFO NAME ": DOCCS mapped at %pR, width %d\n",
170 (unsigned long long)docmem.start, 170 &docmem, width);
171 (unsigned long long)docmem.end, width);
172 171
173 scx200_docflash_map.size = size; 172 scx200_docflash_map.size = size;
174 if (width == 8) 173 if (width == 8)
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 60146984f4be..c08e140d40ed 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -139,7 +139,7 @@ static int __init init_tqm_mtd(void)
139 goto error_mem; 139 goto error_mem;
140 } 140 }
141 141
142 map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL); 142 map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
143 143
144 if (!map_banks[idx]->name) { 144 if (!map_banks[idx]->name) {
145 ret = -ENOMEM; 145 ret = -ENOMEM;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index ee4bb3330bdf..145b3d0dc0db 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -522,10 +522,6 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
522 if (!capable(CAP_SYS_ADMIN)) 522 if (!capable(CAP_SYS_ADMIN))
523 return -EPERM; 523 return -EPERM;
524 524
525 /* Only master mtd device must be used to control partitions */
526 if (!mtd_is_master(mtd))
527 return -EINVAL;
528
529 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 525 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
530 return -EFAULT; 526 return -EFAULT;
531 527
@@ -535,6 +531,10 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
535 switch (a.op) { 531 switch (a.op) {
536 case BLKPG_ADD_PARTITION: 532 case BLKPG_ADD_PARTITION:
537 533
534 /* Only master mtd device must be used to add partitions */
535 if (mtd_is_partition(mtd))
536 return -EINVAL;
537
538 return mtd_add_partition(mtd, p.devname, p.start, p.length); 538 return mtd_add_partition(mtd, p.devname, p.start, p.length);
539 539
540 case BLKPG_DEL_PARTITION: 540 case BLKPG_DEL_PARTITION:
@@ -601,6 +601,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
601 } 601 }
602 602
603 case MEMGETINFO: 603 case MEMGETINFO:
604 memset(&info, 0, sizeof(info));
604 info.type = mtd->type; 605 info.type = mtd->type;
605 info.flags = mtd->flags; 606 info.flags = mtd->flags;
606 info.size = mtd->size; 607 info.size = mtd->size;
@@ -609,7 +610,6 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
609 info.oobsize = mtd->oobsize; 610 info.oobsize = mtd->oobsize;
610 /* The below fields are obsolete */ 611 /* The below fields are obsolete */
611 info.ecctype = -1; 612 info.ecctype = -1;
612 info.eccsize = 0;
613 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 613 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
614 return -EFAULT; 614 return -EFAULT;
615 break; 615 break;
@@ -1201,7 +1201,7 @@ err_unregister_chdev:
1201static void __exit cleanup_mtdchar(void) 1201static void __exit cleanup_mtdchar(void)
1202{ 1202{
1203 unregister_mtd_user(&mtdchar_notifier); 1203 unregister_mtd_user(&mtdchar_notifier);
1204 mntput_long(mtd_inode_mnt); 1204 mntput(mtd_inode_mnt);
1205 unregister_filesystem(&mtd_inodefs_type); 1205 unregister_filesystem(&mtd_inodefs_type);
1206 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1206 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1207} 1207}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index bf8de0943103..5f5777bd3f75 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -776,6 +776,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
776 concat->mtd.size = subdev[0]->size; 776 concat->mtd.size = subdev[0]->size;
777 concat->mtd.erasesize = subdev[0]->erasesize; 777 concat->mtd.erasesize = subdev[0]->erasesize;
778 concat->mtd.writesize = subdev[0]->writesize; 778 concat->mtd.writesize = subdev[0]->writesize;
779 concat->mtd.writebufsize = subdev[0]->writebufsize;
779 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 780 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
780 concat->mtd.oobsize = subdev[0]->oobsize; 781 concat->mtd.oobsize = subdev[0]->oobsize;
781 concat->mtd.oobavail = subdev[0]->oobavail; 782 concat->mtd.oobavail = subdev[0]->oobavail;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index c948150079be..e3e40f440323 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -401,7 +401,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
401 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 401 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
402 402
403 cxt->mtd = NULL; 403 cxt->mtd = NULL;
404 flush_scheduled_work(); 404 flush_work_sync(&cxt->work_erase);
405 flush_work_sync(&cxt->work_write);
405} 406}
406 407
407 408
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 79e3689f1e16..0a4760174782 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -120,8 +120,25 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
120 return -EINVAL; 120 return -EINVAL;
121 if (ops->datbuf && from + ops->len > mtd->size) 121 if (ops->datbuf && from + ops->len > mtd->size)
122 return -EINVAL; 122 return -EINVAL;
123 res = part->master->read_oob(part->master, from + part->offset, ops);
124 123
124 /*
125 * If OOB is also requested, make sure that we do not read past the end
126 * of this partition.
127 */
128 if (ops->oobbuf) {
129 size_t len, pages;
130
131 if (ops->mode == MTD_OOB_AUTO)
132 len = mtd->oobavail;
133 else
134 len = mtd->oobsize;
135 pages = mtd_div_by_ws(mtd->size, mtd);
136 pages -= mtd_div_by_ws(from, mtd);
137 if (ops->ooboffs + ops->ooblen > pages * len)
138 return -EINVAL;
139 }
140
141 res = part->master->read_oob(part->master, from + part->offset, ops);
125 if (unlikely(res)) { 142 if (unlikely(res)) {
126 if (res == -EUCLEAN) 143 if (res == -EUCLEAN)
127 mtd->ecc_stats.corrected++; 144 mtd->ecc_stats.corrected++;
@@ -384,6 +401,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
384 slave->mtd.flags = master->flags & ~part->mask_flags; 401 slave->mtd.flags = master->flags & ~part->mask_flags;
385 slave->mtd.size = part->size; 402 slave->mtd.size = part->size;
386 slave->mtd.writesize = master->writesize; 403 slave->mtd.writesize = master->writesize;
404 slave->mtd.writebufsize = master->writebufsize;
387 slave->mtd.oobsize = master->oobsize; 405 slave->mtd.oobsize = master->oobsize;
388 slave->mtd.oobavail = master->oobavail; 406 slave->mtd.oobavail = master->oobavail;
389 slave->mtd.subpage_sft = master->subpage_sft; 407 slave->mtd.subpage_sft = master->subpage_sft;
@@ -720,19 +738,19 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
720} 738}
721EXPORT_SYMBOL_GPL(parse_mtd_partitions); 739EXPORT_SYMBOL_GPL(parse_mtd_partitions);
722 740
723int mtd_is_master(struct mtd_info *mtd) 741int mtd_is_partition(struct mtd_info *mtd)
724{ 742{
725 struct mtd_part *part; 743 struct mtd_part *part;
726 int nopart = 0; 744 int ispart = 0;
727 745
728 mutex_lock(&mtd_partitions_mutex); 746 mutex_lock(&mtd_partitions_mutex);
729 list_for_each_entry(part, &mtd_partitions, list) 747 list_for_each_entry(part, &mtd_partitions, list)
730 if (&part->mtd == mtd) { 748 if (&part->mtd == mtd) {
731 nopart = 1; 749 ispart = 1;
732 break; 750 break;
733 } 751 }
734 mutex_unlock(&mtd_partitions_mutex); 752 mutex_unlock(&mtd_partitions_mutex);
735 753
736 return nopart; 754 return ispart;
737} 755}
738EXPORT_SYMBOL_GPL(mtd_is_master); 756EXPORT_SYMBOL_GPL(mtd_is_partition);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8229802b4346..c89592239bc7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -96,6 +96,7 @@ config MTD_NAND_SPIA
96config MTD_NAND_AMS_DELTA 96config MTD_NAND_AMS_DELTA
97 tristate "NAND Flash device on Amstrad E3" 97 tristate "NAND Flash device on Amstrad E3"
98 depends on MACH_AMS_DELTA 98 depends on MACH_AMS_DELTA
99 default y
99 help 100 help
100 Support for NAND flash on Amstrad E3 (Delta). 101 Support for NAND flash on Amstrad E3 (Delta).
101 102
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2548e1065bf8..a067d090cb31 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li> 4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
5 * 5 *
6 * Derived from drivers/mtd/toto.c 6 * Derived from drivers/mtd/toto.c
7 * Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
8 * Partially stolen from drivers/mtd/nand/plat_nand.c
7 * 9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -62,9 +64,10 @@ static struct mtd_partition partition_info[] = {
62static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte) 64static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
63{ 65{
64 struct nand_chip *this = mtd->priv; 66 struct nand_chip *this = mtd->priv;
67 void __iomem *io_base = this->priv;
65 68
66 omap_writew(0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL)); 69 writew(0, io_base + OMAP_MPUIO_IO_CNTL);
67 omap_writew(byte, this->IO_ADDR_W); 70 writew(byte, this->IO_ADDR_W);
68 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0); 71 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
69 ndelay(40); 72 ndelay(40);
70 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 73 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
@@ -75,11 +78,12 @@ static u_char ams_delta_read_byte(struct mtd_info *mtd)
75{ 78{
76 u_char res; 79 u_char res;
77 struct nand_chip *this = mtd->priv; 80 struct nand_chip *this = mtd->priv;
81 void __iomem *io_base = this->priv;
78 82
79 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0); 83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
80 ndelay(40); 84 ndelay(40);
81 omap_writew(~0, (OMAP1_MPUIO_BASE + OMAP_MPUIO_IO_CNTL)); 85 writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
82 res = omap_readw(this->IO_ADDR_R); 86 res = readw(this->IO_ADDR_R);
83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 87 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
84 AMS_DELTA_LATCH2_NAND_NRE); 88 AMS_DELTA_LATCH2_NAND_NRE);
85 89
@@ -151,11 +155,16 @@ static int ams_delta_nand_ready(struct mtd_info *mtd)
151/* 155/*
152 * Main initialization routine 156 * Main initialization routine
153 */ 157 */
154static int __init ams_delta_init(void) 158static int __devinit ams_delta_init(struct platform_device *pdev)
155{ 159{
156 struct nand_chip *this; 160 struct nand_chip *this;
161 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
162 void __iomem *io_base;
157 int err = 0; 163 int err = 0;
158 164
165 if (!res)
166 return -ENXIO;
167
159 /* Allocate memory for MTD device structure and private data */ 168 /* Allocate memory for MTD device structure and private data */
160 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + 169 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
161 sizeof(struct nand_chip), GFP_KERNEL); 170 sizeof(struct nand_chip), GFP_KERNEL);
@@ -177,9 +186,25 @@ static int __init ams_delta_init(void)
177 /* Link the private data with the MTD structure */ 186 /* Link the private data with the MTD structure */
178 ams_delta_mtd->priv = this; 187 ams_delta_mtd->priv = this;
179 188
189 if (!request_mem_region(res->start, resource_size(res),
190 dev_name(&pdev->dev))) {
191 dev_err(&pdev->dev, "request_mem_region failed\n");
192 err = -EBUSY;
193 goto out_free;
194 }
195
196 io_base = ioremap(res->start, resource_size(res));
197 if (io_base == NULL) {
198 dev_err(&pdev->dev, "ioremap failed\n");
199 err = -EIO;
200 goto out_release_io;
201 }
202
203 this->priv = io_base;
204
180 /* Set address of NAND IO lines */ 205 /* Set address of NAND IO lines */
181 this->IO_ADDR_R = (OMAP1_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH); 206 this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
182 this->IO_ADDR_W = (OMAP1_MPUIO_BASE + OMAP_MPUIO_OUTPUT); 207 this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
183 this->read_byte = ams_delta_read_byte; 208 this->read_byte = ams_delta_read_byte;
184 this->write_buf = ams_delta_write_buf; 209 this->write_buf = ams_delta_write_buf;
185 this->read_buf = ams_delta_read_buf; 210 this->read_buf = ams_delta_read_buf;
@@ -195,6 +220,8 @@ static int __init ams_delta_init(void)
195 this->chip_delay = 30; 220 this->chip_delay = 30;
196 this->ecc.mode = NAND_ECC_SOFT; 221 this->ecc.mode = NAND_ECC_SOFT;
197 222
223 platform_set_drvdata(pdev, io_base);
224
198 /* Set chip enabled, but */ 225 /* Set chip enabled, but */
199 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE | 226 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
200 AMS_DELTA_LATCH2_NAND_NWE | 227 AMS_DELTA_LATCH2_NAND_NWE |
@@ -214,25 +241,56 @@ static int __init ams_delta_init(void)
214 goto out; 241 goto out;
215 242
216 out_mtd: 243 out_mtd:
244 platform_set_drvdata(pdev, NULL);
245 iounmap(io_base);
246out_release_io:
247 release_mem_region(res->start, resource_size(res));
248out_free:
217 kfree(ams_delta_mtd); 249 kfree(ams_delta_mtd);
218 out: 250 out:
219 return err; 251 return err;
220} 252}
221 253
222module_init(ams_delta_init);
223
224/* 254/*
225 * Clean up routine 255 * Clean up routine
226 */ 256 */
227static void __exit ams_delta_cleanup(void) 257static int __devexit ams_delta_cleanup(struct platform_device *pdev)
228{ 258{
259 void __iomem *io_base = platform_get_drvdata(pdev);
260 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
261
229 /* Release resources, unregister device */ 262 /* Release resources, unregister device */
230 nand_release(ams_delta_mtd); 263 nand_release(ams_delta_mtd);
231 264
265 iounmap(io_base);
266 release_mem_region(res->start, resource_size(res));
267
232 /* Free the MTD device structure */ 268 /* Free the MTD device structure */
233 kfree(ams_delta_mtd); 269 kfree(ams_delta_mtd);
270
271 return 0;
272}
273
274static struct platform_driver ams_delta_nand_driver = {
275 .probe = ams_delta_init,
276 .remove = __devexit_p(ams_delta_cleanup),
277 .driver = {
278 .name = "ams-delta-nand",
279 .owner = THIS_MODULE,
280 },
281};
282
283static int __init ams_delta_nand_init(void)
284{
285 return platform_driver_register(&ams_delta_nand_driver);
286}
287module_init(ams_delta_nand_init);
288
289static void __exit ams_delta_nand_exit(void)
290{
291 platform_driver_unregister(&ams_delta_nand_driver);
234} 292}
235module_exit(ams_delta_cleanup); 293module_exit(ams_delta_nand_exit);
236 294
237MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
238MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>"); 296MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index c141b07b25d1..7a13d42cbabd 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -388,6 +388,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
388 "page_addr: 0x%x, column: 0x%x.\n", 388 "page_addr: 0x%x, column: 0x%x.\n",
389 page_addr, column); 389 page_addr, column);
390 390
391 elbc_fcm_ctrl->column = column;
392 elbc_fcm_ctrl->oob = 0;
391 elbc_fcm_ctrl->use_mdr = 1; 393 elbc_fcm_ctrl->use_mdr = 1;
392 394
393 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) | 395 fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 02edfba25b0c..205b10b9f9b9 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/mtd/fsmc.h> 33#include <linux/mtd/fsmc.h>
34#include <linux/amba/bus.h>
34#include <mtd/mtd-abi.h> 35#include <mtd/mtd-abi.h>
35 36
36static struct nand_ecclayout fsmc_ecc1_layout = { 37static struct nand_ecclayout fsmc_ecc1_layout = {
@@ -119,21 +120,36 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
119 } 120 }
120}; 121};
121 122
122/*
123 * Default partition tables to be used if the partition information not
124 * provided through platform data
125 */
126#define PARTITION(n, off, sz) {.name = n, .offset = off, .size = sz}
127 123
124#ifdef CONFIG_MTD_PARTITIONS
128/* 125/*
126 * Default partition tables to be used if the partition information not
127 * provided through platform data.
128 *
129 * Default partition layout for small page(= 512 bytes) devices 129 * Default partition layout for small page(= 512 bytes) devices
130 * Size for "Root file system" is updated in driver based on actual device size 130 * Size for "Root file system" is updated in driver based on actual device size
131 */ 131 */
132static struct mtd_partition partition_info_16KB_blk[] = { 132static struct mtd_partition partition_info_16KB_blk[] = {
133 PARTITION("X-loader", 0, 4 * 0x4000), 133 {
134 PARTITION("U-Boot", 0x10000, 20 * 0x4000), 134 .name = "X-loader",
135 PARTITION("Kernel", 0x60000, 256 * 0x4000), 135 .offset = 0,
136 PARTITION("Root File System", 0x460000, 0), 136 .size = 4*0x4000,
137 },
138 {
139 .name = "U-Boot",
140 .offset = 0x10000,
141 .size = 20*0x4000,
142 },
143 {
144 .name = "Kernel",
145 .offset = 0x60000,
146 .size = 256*0x4000,
147 },
148 {
149 .name = "Root File System",
150 .offset = 0x460000,
151 .size = 0,
152 },
137}; 153};
138 154
139/* 155/*
@@ -141,19 +157,37 @@ static struct mtd_partition partition_info_16KB_blk[] = {
141 * Size for "Root file system" is updated in driver based on actual device size 157 * Size for "Root file system" is updated in driver based on actual device size
142 */ 158 */
143static struct mtd_partition partition_info_128KB_blk[] = { 159static struct mtd_partition partition_info_128KB_blk[] = {
144 PARTITION("X-loader", 0, 4 * 0x20000), 160 {
145 PARTITION("U-Boot", 0x80000, 12 * 0x20000), 161 .name = "X-loader",
146 PARTITION("Kernel", 0x200000, 48 * 0x20000), 162 .offset = 0,
147 PARTITION("Root File System", 0x800000, 0), 163 .size = 4*0x20000,
164 },
165 {
166 .name = "U-Boot",
167 .offset = 0x80000,
168 .size = 12*0x20000,
169 },
170 {
171 .name = "Kernel",
172 .offset = 0x200000,
173 .size = 48*0x20000,
174 },
175 {
176 .name = "Root File System",
177 .offset = 0x800000,
178 .size = 0,
179 },
148}; 180};
149 181
150#ifdef CONFIG_MTD_CMDLINE_PARTS 182#ifdef CONFIG_MTD_CMDLINE_PARTS
151const char *part_probes[] = { "cmdlinepart", NULL }; 183const char *part_probes[] = { "cmdlinepart", NULL };
152#endif 184#endif
185#endif
153 186
154/** 187/**
155 * struct fsmc_nand_data - atructure for FSMC NAND device state 188 * struct fsmc_nand_data - structure for FSMC NAND device state
156 * 189 *
190 * @pid: Part ID on the AMBA PrimeCell format
157 * @mtd: MTD info for a NAND flash. 191 * @mtd: MTD info for a NAND flash.
158 * @nand: Chip related info for a NAND flash. 192 * @nand: Chip related info for a NAND flash.
159 * @partitions: Partition info for a NAND Flash. 193 * @partitions: Partition info for a NAND Flash.
@@ -169,6 +203,7 @@ const char *part_probes[] = { "cmdlinepart", NULL };
169 * @regs_va: FSMC regs base address. 203 * @regs_va: FSMC regs base address.
170 */ 204 */
171struct fsmc_nand_data { 205struct fsmc_nand_data {
206 u32 pid;
172 struct mtd_info mtd; 207 struct mtd_info mtd;
173 struct nand_chip nand; 208 struct nand_chip nand;
174 struct mtd_partition *partitions; 209 struct mtd_partition *partitions;
@@ -508,7 +543,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
508 struct nand_chip *nand; 543 struct nand_chip *nand;
509 struct fsmc_regs *regs; 544 struct fsmc_regs *regs;
510 struct resource *res; 545 struct resource *res;
511 int nr_parts, ret = 0; 546 int ret = 0;
547 u32 pid;
548 int i;
512 549
513 if (!pdata) { 550 if (!pdata) {
514 dev_err(&pdev->dev, "platform data is NULL\n"); 551 dev_err(&pdev->dev, "platform data is NULL\n");
@@ -598,6 +635,18 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
598 if (ret) 635 if (ret)
599 goto err_probe1; 636 goto err_probe1;
600 637
638 /*
639 * This device ID is actually a common AMBA ID as used on the
640 * AMBA PrimeCell bus. However it is not a PrimeCell.
641 */
642 for (pid = 0, i = 0; i < 4; i++)
643 pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
644 host->pid = pid;
645 dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
646 "revision %02x, config %02x\n",
647 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
648 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
649
601 host->bank = pdata->bank; 650 host->bank = pdata->bank;
602 host->select_chip = pdata->select_bank; 651 host->select_chip = pdata->select_bank;
603 regs = host->regs_va; 652 regs = host->regs_va;
@@ -625,7 +674,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
625 674
626 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16); 675 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16);
627 676
628 if (get_fsmc_version(host->regs_va) == FSMC_VER8) { 677 if (AMBA_REV_BITS(host->pid) >= 8) {
629 nand->ecc.read_page = fsmc_read_page_hwecc; 678 nand->ecc.read_page = fsmc_read_page_hwecc;
630 nand->ecc.calculate = fsmc_read_hwecc_ecc4; 679 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
631 nand->ecc.correct = fsmc_correct_data; 680 nand->ecc.correct = fsmc_correct_data;
@@ -645,7 +694,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
645 goto err_probe; 694 goto err_probe;
646 } 695 }
647 696
648 if (get_fsmc_version(host->regs_va) == FSMC_VER8) { 697 if (AMBA_REV_BITS(host->pid) >= 8) {
649 if (host->mtd.writesize == 512) { 698 if (host->mtd.writesize == 512) {
650 nand->ecc.layout = &fsmc_ecc4_sp_layout; 699 nand->ecc.layout = &fsmc_ecc4_sp_layout;
651 host->ecc_place = &fsmc_ecc4_sp_place; 700 host->ecc_place = &fsmc_ecc4_sp_place;
@@ -676,11 +725,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
676 * Check if partition info passed via command line 725 * Check if partition info passed via command line
677 */ 726 */
678 host->mtd.name = "nand"; 727 host->mtd.name = "nand";
679 nr_parts = parse_mtd_partitions(&host->mtd, part_probes, 728 host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
680 &host->partitions, 0); 729 &host->partitions, 0);
681 if (nr_parts > 0) { 730 if (host->nr_partitions <= 0) {
682 host->nr_partitions = nr_parts;
683 } else {
684#endif 731#endif
685 /* 732 /*
686 * Check if partition info passed via command line 733 * Check if partition info passed via command line
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 67343fc31bd5..cea38a5d4ac5 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,58 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
251 return 0; 251 return 0;
252} 252}
253 253
254
255/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos
256 * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit
257 * into the eccpos array. */
258static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
259 struct nand_chip *chip, uint8_t *buf, int page)
260{
261 int i, eccsize = chip->ecc.size;
262 int eccbytes = chip->ecc.bytes;
263 int eccsteps = chip->ecc.steps;
264 uint8_t *p = buf;
265 unsigned int ecc_offset = chip->page_shift;
266
267 /* Read the OOB area first */
268 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
269 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
270 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
271
272 for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
273 int stat;
274
275 chip->ecc.hwctl(mtd, NAND_ECC_READ);
276 chip->read_buf(mtd, p, eccsize);
277
278 stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL);
279 if (stat < 0)
280 mtd->ecc_stats.failed++;
281 else
282 mtd->ecc_stats.corrected += stat;
283 }
284 return 0;
285}
286
287/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */
288static void jz_nand_write_page_hwecc(struct mtd_info *mtd,
289 struct nand_chip *chip, const uint8_t *buf)
290{
291 int i, eccsize = chip->ecc.size;
292 int eccbytes = chip->ecc.bytes;
293 int eccsteps = chip->ecc.steps;
294 const uint8_t *p = buf;
295 unsigned int ecc_offset = chip->page_shift;
296
297 for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
298 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
299 chip->write_buf(mtd, p, eccsize);
300 chip->ecc.calculate(mtd, p, &chip->oob_poi[i]);
301 }
302
303 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
304}
305
306#ifdef CONFIG_MTD_CMDLINE_PARTS 254#ifdef CONFIG_MTD_CMDLINE_PARTS
307static const char *part_probes[] = {"cmdline", NULL}; 255static const char *part_probes[] = {"cmdline", NULL};
308#endif 256#endif
@@ -393,9 +341,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
393 chip->ecc.size = 512; 341 chip->ecc.size = 512;
394 chip->ecc.bytes = 9; 342 chip->ecc.bytes = 9;
395 343
396 chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first;
397 chip->ecc.write_page = jz_nand_write_page_hwecc;
398
399 if (pdata) 344 if (pdata)
400 chip->ecc.layout = pdata->ecc_layout; 345 chip->ecc.layout = pdata->ecc_layout;
401 346
@@ -489,7 +434,7 @@ static int __devexit jz_nand_remove(struct platform_device *pdev)
489 return 0; 434 return 0;
490} 435}
491 436
492struct platform_driver jz_nand_driver = { 437static struct platform_driver jz_nand_driver = {
493 .probe = jz_nand_probe, 438 .probe = jz_nand_probe,
494 .remove = __devexit_p(jz_nand_remove), 439 .remove = __devexit_p(jz_nand_remove),
495 .driver = { 440 .driver = {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 214b03afdd48..ef932ba55a0b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1009,7 +1009,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1009 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 1009 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1010 struct mxc_nand_host *host; 1010 struct mxc_nand_host *host;
1011 struct resource *res; 1011 struct resource *res;
1012 int err = 0, nr_parts = 0; 1012 int err = 0, __maybe_unused nr_parts = 0;
1013 struct nand_ecclayout *oob_smallpage, *oob_largepage; 1013 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1014 1014
1015 /* Allocate memory for MTD device structure and private data */ 1015 /* Allocate memory for MTD device structure and private data */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31bf376b82a0..a9c6ce745767 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2865,20 +2865,24 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2865 2865
2866 /* check version */ 2866 /* check version */
2867 val = le16_to_cpu(p->revision); 2867 val = le16_to_cpu(p->revision);
2868 if (val == 1 || val > (1 << 4)) { 2868 if (val & (1 << 5))
2869 printk(KERN_INFO "%s: unsupported ONFI version: %d\n", 2869 chip->onfi_version = 23;
2870 __func__, val); 2870 else if (val & (1 << 4))
2871 return 0;
2872 }
2873
2874 if (val & (1 << 4))
2875 chip->onfi_version = 22; 2871 chip->onfi_version = 22;
2876 else if (val & (1 << 3)) 2872 else if (val & (1 << 3))
2877 chip->onfi_version = 21; 2873 chip->onfi_version = 21;
2878 else if (val & (1 << 2)) 2874 else if (val & (1 << 2))
2879 chip->onfi_version = 20; 2875 chip->onfi_version = 20;
2880 else 2876 else if (val & (1 << 1))
2881 chip->onfi_version = 10; 2877 chip->onfi_version = 10;
2878 else
2879 chip->onfi_version = 0;
2880
2881 if (!chip->onfi_version) {
2882 printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
2883 __func__, val);
2884 return 0;
2885 }
2882 2886
2883 sanitize_string(p->manufacturer, sizeof(p->manufacturer)); 2887 sanitize_string(p->manufacturer, sizeof(p->manufacturer));
2884 sanitize_string(p->model, sizeof(p->model)); 2888 sanitize_string(p->model, sizeof(p->model));
@@ -2887,7 +2891,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2887 mtd->writesize = le32_to_cpu(p->byte_per_page); 2891 mtd->writesize = le32_to_cpu(p->byte_per_page);
2888 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2892 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2889 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2893 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2890 chip->chipsize = le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2894 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
2891 busw = 0; 2895 busw = 0;
2892 if (le16_to_cpu(p->features) & 1) 2896 if (le16_to_cpu(p->features) & 1)
2893 busw = NAND_BUSWIDTH_16; 2897 busw = NAND_BUSWIDTH_16;
@@ -3157,7 +3161,7 @@ ident_done:
3157 printk(KERN_INFO "NAND device: Manufacturer ID:" 3161 printk(KERN_INFO "NAND device: Manufacturer ID:"
3158 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3162 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
3159 nand_manuf_ids[maf_idx].name, 3163 nand_manuf_ids[maf_idx].name,
3160 chip->onfi_version ? type->name : chip->onfi_params.model); 3164 chip->onfi_version ? chip->onfi_params.model : type->name);
3161 3165
3162 return type; 3166 return type;
3163} 3167}
@@ -3435,6 +3439,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3435 mtd->resume = nand_resume; 3439 mtd->resume = nand_resume;
3436 mtd->block_isbad = nand_block_isbad; 3440 mtd->block_isbad = nand_block_isbad;
3437 mtd->block_markbad = nand_block_markbad; 3441 mtd->block_markbad = nand_block_markbad;
3442 mtd->writebufsize = mtd->writesize;
3438 3443
3439 /* propagate ecc.layout to mtd_info */ 3444 /* propagate ecc.layout to mtd_info */
3440 mtd->ecclayout = chip->ecc.layout; 3445 mtd->ecclayout = chip->ecc.layout;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 586b981f0e61..6ebd869993aa 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1092,7 +1092,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
1092 1092
1093/** 1093/**
1094 * verify_bbt_descr - verify the bad block description 1094 * verify_bbt_descr - verify the bad block description
1095 * @bd: the table to verify 1095 * @mtd: MTD device structure
1096 * @bd: the table to verify
1096 * 1097 *
1097 * This functions performs a few sanity checks on the bad block description 1098 * This functions performs a few sanity checks on the bad block description
1098 * table. 1099 * table.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a6a73aab1253..a5aa99f014ba 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -210,12 +210,12 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
210#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */ 210#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
211#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */ 211#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
212#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */ 212#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
213#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */ 213#define STATE_CMD_PAGEPROG 0x00000004 /* start page program */
214#define STATE_CMD_READOOB 0x00000005 /* read OOB area */ 214#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
215#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ 215#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
216#define STATE_CMD_STATUS 0x00000007 /* read status */ 216#define STATE_CMD_STATUS 0x00000007 /* read status */
217#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */ 217#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
218#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */ 218#define STATE_CMD_SEQIN 0x00000009 /* sequential data input */
219#define STATE_CMD_READID 0x0000000A /* read ID */ 219#define STATE_CMD_READID 0x0000000A /* read ID */
220#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ 220#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
221#define STATE_CMD_RESET 0x0000000C /* reset */ 221#define STATE_CMD_RESET 0x0000000C /* reset */
@@ -230,7 +230,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
230#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */ 230#define STATE_ADDR_ZERO 0x00000040 /* one byte zero address was accepted */
231#define STATE_ADDR_MASK 0x00000070 /* address states mask */ 231#define STATE_ADDR_MASK 0x00000070 /* address states mask */
232 232
233/* Durind data input/output the simulator is in these states */ 233/* During data input/output the simulator is in these states */
234#define STATE_DATAIN 0x00000100 /* waiting for data input */ 234#define STATE_DATAIN 0x00000100 /* waiting for data input */
235#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */ 235#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
236 236
@@ -248,7 +248,7 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
248 248
249/* Simulator's actions bit masks */ 249/* Simulator's actions bit masks */
250#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */ 250#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
251#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */ 251#define ACTION_PRGPAGE 0x00200000 /* program the internal buffer to flash */
252#define ACTION_SECERASE 0x00300000 /* erase sector */ 252#define ACTION_SECERASE 0x00300000 /* erase sector */
253#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */ 253#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
254#define ACTION_HALFOFF 0x00500000 /* add to address half of page */ 254#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
@@ -263,18 +263,18 @@ MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in d
263#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 263#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
264#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 264#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
265#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 265#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
266#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */ 266#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
267#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 267#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
268#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 268#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
269#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 269#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
270#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ 270#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
271 271
272/* Remove action bits ftom state */ 272/* Remove action bits from state */
273#define NS_STATE(x) ((x) & ~ACTION_MASK) 273#define NS_STATE(x) ((x) & ~ACTION_MASK)
274 274
275/* 275/*
276 * Maximum previous states which need to be saved. Currently saving is 276 * Maximum previous states which need to be saved. Currently saving is
277 * only needed for page programm operation with preceeded read command 277 * only needed for page program operation with preceded read command
278 * (which is only valid for 512-byte pages). 278 * (which is only valid for 512-byte pages).
279 */ 279 */
280#define NS_MAX_PREVSTATES 1 280#define NS_MAX_PREVSTATES 1
@@ -380,16 +380,16 @@ static struct nandsim_operations {
380 /* Read OOB */ 380 /* Read OOB */
381 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY, 381 {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
382 STATE_DATAOUT, STATE_READY}}, 382 STATE_DATAOUT, STATE_READY}},
383 /* Programm page starting from the beginning */ 383 /* Program page starting from the beginning */
384 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN, 384 {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
385 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 385 STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
386 /* Programm page starting from the beginning */ 386 /* Program page starting from the beginning */
387 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE, 387 {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
388 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 388 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
389 /* Programm page starting from the second half */ 389 /* Program page starting from the second half */
390 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE, 390 {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
391 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 391 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
392 /* Programm OOB */ 392 /* Program OOB */
393 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE, 393 {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
394 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}}, 394 STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
395 /* Erase sector */ 395 /* Erase sector */
@@ -470,7 +470,7 @@ static int alloc_device(struct nandsim *ns)
470 err = -EINVAL; 470 err = -EINVAL;
471 goto err_close; 471 goto err_close;
472 } 472 }
473 ns->pages_written = vmalloc(ns->geom.pgnum); 473 ns->pages_written = vzalloc(ns->geom.pgnum);
474 if (!ns->pages_written) { 474 if (!ns->pages_written) {
475 NS_ERR("alloc_device: unable to allocate pages written array\n"); 475 NS_ERR("alloc_device: unable to allocate pages written array\n");
476 err = -ENOMEM; 476 err = -ENOMEM;
@@ -483,7 +483,6 @@ static int alloc_device(struct nandsim *ns)
483 goto err_free; 483 goto err_free;
484 } 484 }
485 ns->cfile = cfile; 485 ns->cfile = cfile;
486 memset(ns->pages_written, 0, ns->geom.pgnum);
487 return 0; 486 return 0;
488 } 487 }
489 488
@@ -1171,9 +1170,9 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1171 * of supported operations. 1170 * of supported operations.
1172 * 1171 *
1173 * Operation can be unknown because of the following. 1172 * Operation can be unknown because of the following.
1174 * 1. New command was accepted and this is the firs call to find the 1173 * 1. New command was accepted and this is the first call to find the
1175 * correspondent states chain. In this case ns->npstates = 0; 1174 * correspondent states chain. In this case ns->npstates = 0;
1176 * 2. There is several operations which begin with the same command(s) 1175 * 2. There are several operations which begin with the same command(s)
1177 * (for example program from the second half and read from the 1176 * (for example program from the second half and read from the
1178 * second half operations both begin with the READ1 command). In this 1177 * second half operations both begin with the READ1 command). In this
1179 * case the ns->pstates[] array contains previous states. 1178 * case the ns->pstates[] array contains previous states.
@@ -1186,7 +1185,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1186 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is 1185 * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
1187 * zeroed). 1186 * zeroed).
1188 * 1187 *
1189 * If there are several maches, the current state is pushed to the 1188 * If there are several matches, the current state is pushed to the
1190 * ns->pstates. 1189 * ns->pstates.
1191 * 1190 *
1192 * The operation can be unknown only while commands are input to the chip. 1191 * The operation can be unknown only while commands are input to the chip.
@@ -1195,10 +1194,10 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
1195 * operation is searched using the following pattern: 1194 * operation is searched using the following pattern:
1196 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input> 1195 * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
1197 * 1196 *
1198 * It is supposed that this pattern must either match one operation on 1197 * It is supposed that this pattern must either match one operation or
1199 * none. There can't be ambiguity in that case. 1198 * none. There can't be ambiguity in that case.
1200 * 1199 *
1201 * If no matches found, the functions does the following: 1200 * If no matches found, the function does the following:
1202 * 1. if there are saved states present, try to ignore them and search 1201 * 1. if there are saved states present, try to ignore them and search
1203 * again only using the last command. If nothing was found, switch 1202 * again only using the last command. If nothing was found, switch
1204 * to the STATE_READY state. 1203 * to the STATE_READY state.
@@ -1668,7 +1667,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
1668 1667
1669 case ACTION_PRGPAGE: 1668 case ACTION_PRGPAGE:
1670 /* 1669 /*
1671 * Programm page - move internal buffer data to the page. 1670 * Program page - move internal buffer data to the page.
1672 */ 1671 */
1673 1672
1674 if (ns->lines.wp) { 1673 if (ns->lines.wp) {
@@ -1933,7 +1932,7 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
1933 NS_DBG("read_byte: all bytes were read\n"); 1932 NS_DBG("read_byte: all bytes were read\n");
1934 1933
1935 /* 1934 /*
1936 * The OPT_AUTOINCR allows to read next conseqitive pages without 1935 * The OPT_AUTOINCR allows to read next consecutive pages without
1937 * new read operation cycle. 1936 * new read operation cycle.
1938 */ 1937 */
1939 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { 1938 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 6ddb2461d740..bb277a54986f 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -107,7 +107,7 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev,
107 if (pasemi_nand_mtd) 107 if (pasemi_nand_mtd)
108 return -ENODEV; 108 return -ENODEV;
109 109
110 pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end); 110 pr_debug("pasemi_nand at %pR\n", &res);
111 111
112 /* Allocate memory for MTD device structure and private data */ 112 /* Allocate memory for MTD device structure and private data */
113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + 113 pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 17f8518cc5eb..ea2c288df3f6 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -885,6 +885,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
885 /* set info fields needed to __readid */ 885 /* set info fields needed to __readid */
886 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; 886 info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
887 info->reg_ndcr = ndcr; 887 info->reg_ndcr = ndcr;
888 info->cmdset = &default_cmdset;
888 889
889 if (__readid(info, &id)) 890 if (__readid(info, &id))
890 return -ENODEV; 891 return -ENODEV;
@@ -915,7 +916,6 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
915 916
916 info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 917 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
917 info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 918 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
918 info->cmdset = &default_cmdset;
919 919
920 return 0; 920 return 0;
921} 921}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 054a41c0ef4a..ca270a4881a4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -277,8 +277,9 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
277 ret = nand_scan_ident(mtd, 1, NULL); 277 ret = nand_scan_ident(mtd, 1, NULL);
278 if (!ret) { 278 if (!ret) {
279 if (mtd->writesize >= 512) { 279 if (mtd->writesize >= 512) {
280 chip->ecc.size = mtd->writesize; 280 /* Hardware ECC 6 byte ECC per 512 Byte data */
281 chip->ecc.bytes = 3 * (mtd->writesize / 256); 281 chip->ecc.size = 512;
282 chip->ecc.bytes = 6;
282 } 283 }
283 ret = nand_scan_tail(mtd); 284 ret = nand_scan_tail(mtd);
284 } 285 }
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index d0894ca7798b..ac31f461cc1c 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,6 +35,7 @@
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/regulator/consumer.h>
38 39
39#include <asm/mach/flash.h> 40#include <asm/mach/flash.h>
40#include <plat/gpmc.h> 41#include <plat/gpmc.h>
@@ -63,8 +64,13 @@ struct omap2_onenand {
63 int dma_channel; 64 int dma_channel;
64 int freq; 65 int freq;
65 int (*setup)(void __iomem *base, int freq); 66 int (*setup)(void __iomem *base, int freq);
67 struct regulator *regulator;
66}; 68};
67 69
70#ifdef CONFIG_MTD_PARTITIONS
71static const char *part_probes[] = { "cmdlinepart", NULL, };
72#endif
73
68static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) 74static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69{ 75{
70 struct omap2_onenand *c = data; 76 struct omap2_onenand *c = data;
@@ -108,8 +114,9 @@ static void wait_warn(char *msg, int state, unsigned int ctrl,
108static int omap2_onenand_wait(struct mtd_info *mtd, int state) 114static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109{ 115{
110 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); 116 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
117 struct onenand_chip *this = mtd->priv;
111 unsigned int intr = 0; 118 unsigned int intr = 0;
112 unsigned int ctrl; 119 unsigned int ctrl, ctrl_mask;
113 unsigned long timeout; 120 unsigned long timeout;
114 u32 syscfg; 121 u32 syscfg;
115 122
@@ -180,7 +187,8 @@ retry:
180 if (result == 0) { 187 if (result == 0) {
181 /* Timeout after 20ms */ 188 /* Timeout after 20ms */
182 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 189 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
183 if (ctrl & ONENAND_CTRL_ONGO) { 190 if (ctrl & ONENAND_CTRL_ONGO &&
191 !this->ongoing) {
184 /* 192 /*
185 * The operation seems to be still going 193 * The operation seems to be still going
186 * so give it some more time. 194 * so give it some more time.
@@ -269,7 +277,11 @@ retry:
269 return -EIO; 277 return -EIO;
270 } 278 }
271 279
272 if (ctrl & 0xFE9F) 280 ctrl_mask = 0xFE9F;
281 if (this->ongoing)
282 ctrl_mask &= ~0x8000;
283
284 if (ctrl & ctrl_mask)
273 wait_warn("unexpected controller status", state, ctrl, intr); 285 wait_warn("unexpected controller status", state, ctrl, intr);
274 286
275 return 0; 287 return 0;
@@ -591,6 +603,30 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
591 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); 603 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
592} 604}
593 605
606static int omap2_onenand_enable(struct mtd_info *mtd)
607{
608 int ret;
609 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
610
611 ret = regulator_enable(c->regulator);
612 if (ret != 0)
613 dev_err(&c->pdev->dev, "cant enable regulator\n");
614
615 return ret;
616}
617
618static int omap2_onenand_disable(struct mtd_info *mtd)
619{
620 int ret;
621 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
622
623 ret = regulator_disable(c->regulator);
624 if (ret != 0)
625 dev_err(&c->pdev->dev, "cant disable regulator\n");
626
627 return ret;
628}
629
594static int __devinit omap2_onenand_probe(struct platform_device *pdev) 630static int __devinit omap2_onenand_probe(struct platform_device *pdev)
595{ 631{
596 struct omap_onenand_platform_data *pdata; 632 struct omap_onenand_platform_data *pdata;
@@ -705,8 +741,18 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
705 } 741 }
706 } 742 }
707 743
744 if (pdata->regulator_can_sleep) {
745 c->regulator = regulator_get(&pdev->dev, "vonenand");
746 if (IS_ERR(c->regulator)) {
747 dev_err(&pdev->dev, "Failed to get regulator\n");
748 goto err_release_dma;
749 }
750 c->onenand.enable = omap2_onenand_enable;
751 c->onenand.disable = omap2_onenand_disable;
752 }
753
708 if ((r = onenand_scan(&c->mtd, 1)) < 0) 754 if ((r = onenand_scan(&c->mtd, 1)) < 0)
709 goto err_release_dma; 755 goto err_release_regulator;
710 756
711 switch ((c->onenand.version_id >> 4) & 0xf) { 757 switch ((c->onenand.version_id >> 4) & 0xf) {
712 case 0: 758 case 0:
@@ -727,13 +773,15 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
727 } 773 }
728 774
729#ifdef CONFIG_MTD_PARTITIONS 775#ifdef CONFIG_MTD_PARTITIONS
730 if (pdata->parts != NULL) 776 r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
731 r = add_mtd_partitions(&c->mtd, pdata->parts, 777 if (r > 0)
732 pdata->nr_parts); 778 r = add_mtd_partitions(&c->mtd, c->parts, r);
779 else if (pdata->parts != NULL)
780 r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
733 else 781 else
734#endif 782#endif
735 r = add_mtd_device(&c->mtd); 783 r = add_mtd_device(&c->mtd);
736 if (r < 0) 784 if (r)
737 goto err_release_onenand; 785 goto err_release_onenand;
738 786
739 platform_set_drvdata(pdev, c); 787 platform_set_drvdata(pdev, c);
@@ -742,6 +790,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
742 790
743err_release_onenand: 791err_release_onenand:
744 onenand_release(&c->mtd); 792 onenand_release(&c->mtd);
793err_release_regulator:
794 regulator_put(c->regulator);
745err_release_dma: 795err_release_dma:
746 if (c->dma_channel != -1) 796 if (c->dma_channel != -1)
747 omap_free_dma(c->dma_channel); 797 omap_free_dma(c->dma_channel);
@@ -757,6 +807,7 @@ err_release_mem_region:
757err_free_cs: 807err_free_cs:
758 gpmc_cs_free(c->gpmc_cs); 808 gpmc_cs_free(c->gpmc_cs);
759err_kfree: 809err_kfree:
810 kfree(c->parts);
760 kfree(c); 811 kfree(c);
761 812
762 return r; 813 return r;
@@ -766,18 +817,8 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
766{ 817{
767 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); 818 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
768 819
769 BUG_ON(c == NULL);
770
771#ifdef CONFIG_MTD_PARTITIONS
772 if (c->parts)
773 del_mtd_partitions(&c->mtd);
774 else
775 del_mtd_device(&c->mtd);
776#else
777 del_mtd_device(&c->mtd);
778#endif
779
780 onenand_release(&c->mtd); 820 onenand_release(&c->mtd);
821 regulator_put(c->regulator);
781 if (c->dma_channel != -1) 822 if (c->dma_channel != -1)
782 omap_free_dma(c->dma_channel); 823 omap_free_dma(c->dma_channel);
783 omap2_onenand_shutdown(pdev); 824 omap2_onenand_shutdown(pdev);
@@ -789,6 +830,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
789 iounmap(c->onenand.base); 830 iounmap(c->onenand.base);
790 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 831 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
791 gpmc_cs_free(c->gpmc_cs); 832 gpmc_cs_free(c->gpmc_cs);
833 kfree(c->parts);
792 kfree(c); 834 kfree(c);
793 835
794 return 0; 836 return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 6b3a875647c9..bac41caa8df7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -400,8 +400,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
400 value = onenand_bufferram_address(this, block); 400 value = onenand_bufferram_address(this, block);
401 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 401 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
402 402
403 if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) || 403 if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this))
404 ONENAND_IS_4KB_PAGE(this))
405 /* It is always BufferRAM0 */ 404 /* It is always BufferRAM0 */
406 ONENAND_SET_BUFFERRAM0(this); 405 ONENAND_SET_BUFFERRAM0(this);
407 else 406 else
@@ -430,7 +429,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
430 case FLEXONENAND_CMD_RECOVER_LSB: 429 case FLEXONENAND_CMD_RECOVER_LSB:
431 case ONENAND_CMD_READ: 430 case ONENAND_CMD_READ:
432 case ONENAND_CMD_READOOB: 431 case ONENAND_CMD_READOOB:
433 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 432 if (ONENAND_IS_4KB_PAGE(this))
434 /* It is always BufferRAM0 */ 433 /* It is always BufferRAM0 */
435 dataram = ONENAND_SET_BUFFERRAM0(this); 434 dataram = ONENAND_SET_BUFFERRAM0(this);
436 else 435 else
@@ -949,6 +948,8 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state)
949 if (this->state == FL_READY) { 948 if (this->state == FL_READY) {
950 this->state = new_state; 949 this->state = new_state;
951 spin_unlock(&this->chip_lock); 950 spin_unlock(&this->chip_lock);
951 if (new_state != FL_PM_SUSPENDED && this->enable)
952 this->enable(mtd);
952 break; 953 break;
953 } 954 }
954 if (new_state == FL_PM_SUSPENDED) { 955 if (new_state == FL_PM_SUSPENDED) {
@@ -975,6 +976,8 @@ static void onenand_release_device(struct mtd_info *mtd)
975{ 976{
976 struct onenand_chip *this = mtd->priv; 977 struct onenand_chip *this = mtd->priv;
977 978
979 if (this->state != FL_PM_SUSPENDED && this->disable)
980 this->disable(mtd);
978 /* Release the chip */ 981 /* Release the chip */
979 spin_lock(&this->chip_lock); 982 spin_lock(&this->chip_lock);
980 this->state = FL_READY; 983 this->state = FL_READY;
@@ -1353,7 +1356,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
1353 1356
1354 stats = mtd->ecc_stats; 1357 stats = mtd->ecc_stats;
1355 1358
1356 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1359 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1357 1360
1358 while (read < len) { 1361 while (read < len) {
1359 cond_resched(); 1362 cond_resched();
@@ -1429,7 +1432,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
1429 int ret; 1432 int ret;
1430 1433
1431 onenand_get_device(mtd, FL_READING); 1434 onenand_get_device(mtd, FL_READING);
1432 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1435 ret = ONENAND_IS_4KB_PAGE(this) ?
1433 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 1436 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
1434 onenand_read_ops_nolock(mtd, from, &ops); 1437 onenand_read_ops_nolock(mtd, from, &ops);
1435 onenand_release_device(mtd); 1438 onenand_release_device(mtd);
@@ -1464,7 +1467,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
1464 1467
1465 onenand_get_device(mtd, FL_READING); 1468 onenand_get_device(mtd, FL_READING);
1466 if (ops->datbuf) 1469 if (ops->datbuf)
1467 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1470 ret = ONENAND_IS_4KB_PAGE(this) ?
1468 onenand_mlc_read_ops_nolock(mtd, from, ops) : 1471 onenand_mlc_read_ops_nolock(mtd, from, ops) :
1469 onenand_read_ops_nolock(mtd, from, ops); 1472 onenand_read_ops_nolock(mtd, from, ops);
1470 else 1473 else
@@ -1485,8 +1488,7 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1485{ 1488{
1486 struct onenand_chip *this = mtd->priv; 1489 struct onenand_chip *this = mtd->priv;
1487 unsigned long timeout; 1490 unsigned long timeout;
1488 unsigned int interrupt; 1491 unsigned int interrupt, ctrl, ecc, addr1, addr8;
1489 unsigned int ctrl;
1490 1492
1491 /* The 20 msec is enough */ 1493 /* The 20 msec is enough */
1492 timeout = jiffies + msecs_to_jiffies(20); 1494 timeout = jiffies + msecs_to_jiffies(20);
@@ -1498,25 +1500,28 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1498 /* To get correct interrupt status in timeout case */ 1500 /* To get correct interrupt status in timeout case */
1499 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1501 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1500 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1502 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1503 addr1 = this->read_word(this->base + ONENAND_REG_START_ADDRESS1);
1504 addr8 = this->read_word(this->base + ONENAND_REG_START_ADDRESS8);
1501 1505
1502 if (interrupt & ONENAND_INT_READ) { 1506 if (interrupt & ONENAND_INT_READ) {
1503 int ecc = onenand_read_ecc(this); 1507 ecc = onenand_read_ecc(this);
1504 if (ecc & ONENAND_ECC_2BIT_ALL) { 1508 if (ecc & ONENAND_ECC_2BIT_ALL) {
1505 printk(KERN_WARNING "%s: ecc error = 0x%04x, " 1509 printk(KERN_DEBUG "%s: ecc 0x%04x ctrl 0x%04x "
1506 "controller error 0x%04x\n", 1510 "intr 0x%04x addr1 %#x addr8 %#x\n",
1507 __func__, ecc, ctrl); 1511 __func__, ecc, ctrl, interrupt, addr1, addr8);
1508 return ONENAND_BBT_READ_ECC_ERROR; 1512 return ONENAND_BBT_READ_ECC_ERROR;
1509 } 1513 }
1510 } else { 1514 } else {
1511 printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n", 1515 printk(KERN_ERR "%s: read timeout! ctrl 0x%04x "
1512 __func__, ctrl, interrupt); 1516 "intr 0x%04x addr1 %#x addr8 %#x\n",
1517 __func__, ctrl, interrupt, addr1, addr8);
1513 return ONENAND_BBT_READ_FATAL_ERROR; 1518 return ONENAND_BBT_READ_FATAL_ERROR;
1514 } 1519 }
1515 1520
1516 /* Initial bad block case: 0x2400 or 0x0400 */ 1521 /* Initial bad block case: 0x2400 or 0x0400 */
1517 if (ctrl & ONENAND_CTRL_ERROR) { 1522 if (ctrl & ONENAND_CTRL_ERROR) {
1518 printk(KERN_DEBUG "%s: controller error = 0x%04x\n", 1523 printk(KERN_DEBUG "%s: ctrl 0x%04x intr 0x%04x addr1 %#x "
1519 __func__, ctrl); 1524 "addr8 %#x\n", __func__, ctrl, interrupt, addr1, addr8);
1520 return ONENAND_BBT_READ_ERROR; 1525 return ONENAND_BBT_READ_ERROR;
1521 } 1526 }
1522 1527
@@ -1558,7 +1563,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
1558 1563
1559 column = from & (mtd->oobsize - 1); 1564 column = from & (mtd->oobsize - 1);
1560 1565
1561 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1566 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1562 1567
1563 while (read < len) { 1568 while (read < len) {
1564 cond_resched(); 1569 cond_resched();
@@ -1612,7 +1617,7 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
1612 u_char *oob_buf = this->oob_buf; 1617 u_char *oob_buf = this->oob_buf;
1613 int status, i, readcmd; 1618 int status, i, readcmd;
1614 1619
1615 readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB; 1620 readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
1616 1621
1617 this->command(mtd, readcmd, to, mtd->oobsize); 1622 this->command(mtd, readcmd, to, mtd->oobsize);
1618 onenand_update_bufferram(mtd, to, 0); 1623 onenand_update_bufferram(mtd, to, 0);
@@ -1845,7 +1850,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1845 const u_char *buf = ops->datbuf; 1850 const u_char *buf = ops->datbuf;
1846 const u_char *oob = ops->oobbuf; 1851 const u_char *oob = ops->oobbuf;
1847 u_char *oobbuf; 1852 u_char *oobbuf;
1848 int ret = 0; 1853 int ret = 0, cmd;
1849 1854
1850 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", 1855 DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
1851 __func__, (unsigned int) to, (int) len); 1856 __func__, (unsigned int) to, (int) len);
@@ -1954,7 +1959,19 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1954 ONENAND_SET_NEXT_BUFFERRAM(this); 1959 ONENAND_SET_NEXT_BUFFERRAM(this);
1955 } 1960 }
1956 1961
1957 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); 1962 this->ongoing = 0;
1963 cmd = ONENAND_CMD_PROG;
1964
1965 /* Exclude 1st OTP and OTP blocks for cache program feature */
1966 if (ONENAND_IS_CACHE_PROGRAM(this) &&
1967 likely(onenand_block(this, to) != 0) &&
1968 ONENAND_IS_4KB_PAGE(this) &&
1969 ((written + thislen) < len)) {
1970 cmd = ONENAND_CMD_2X_CACHE_PROG;
1971 this->ongoing = 1;
1972 }
1973
1974 this->command(mtd, cmd, to, mtd->writesize);
1958 1975
1959 /* 1976 /*
1960 * 2 PLANE, MLC, and Flex-OneNAND wait here 1977 * 2 PLANE, MLC, and Flex-OneNAND wait here
@@ -2067,7 +2084,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2067 2084
2068 oobbuf = this->oob_buf; 2085 oobbuf = this->oob_buf;
2069 2086
2070 oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB; 2087 oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
2071 2088
2072 /* Loop until all data write */ 2089 /* Loop until all data write */
2073 while (written < len) { 2090 while (written < len) {
@@ -2086,7 +2103,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
2086 memcpy(oobbuf + column, buf, thislen); 2103 memcpy(oobbuf + column, buf, thislen);
2087 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 2104 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
2088 2105
2089 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) { 2106 if (ONENAND_IS_4KB_PAGE(this)) {
2090 /* Set main area of DataRAM to 0xff*/ 2107 /* Set main area of DataRAM to 0xff*/
2091 memset(this->page_buf, 0xff, mtd->writesize); 2108 memset(this->page_buf, 0xff, mtd->writesize);
2092 this->write_bufferram(mtd, ONENAND_DATARAM, 2109 this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -2481,7 +2498,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2481 /* Grab the lock and see if the device is available */ 2498 /* Grab the lock and see if the device is available */
2482 onenand_get_device(mtd, FL_ERASING); 2499 onenand_get_device(mtd, FL_ERASING);
2483 2500
2484 if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) { 2501 if (ONENAND_IS_4KB_PAGE(this) || region ||
2502 instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
2485 /* region is set for Flex-OneNAND (no mb erase) */ 2503 /* region is set for Flex-OneNAND (no mb erase) */
2486 ret = onenand_block_by_block_erase(mtd, instr, 2504 ret = onenand_block_by_block_erase(mtd, instr,
2487 region, block_size); 2505 region, block_size);
@@ -3029,7 +3047,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
3029 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 3047 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
3030 this->wait(mtd, FL_OTPING); 3048 this->wait(mtd, FL_OTPING);
3031 3049
3032 ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 3050 ret = ONENAND_IS_4KB_PAGE(this) ?
3033 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 3051 onenand_mlc_read_ops_nolock(mtd, from, &ops) :
3034 onenand_read_ops_nolock(mtd, from, &ops); 3052 onenand_read_ops_nolock(mtd, from, &ops);
3035 3053
@@ -3377,8 +3395,10 @@ static void onenand_check_features(struct mtd_info *mtd)
3377 case ONENAND_DEVICE_DENSITY_4Gb: 3395 case ONENAND_DEVICE_DENSITY_4Gb:
3378 if (ONENAND_IS_DDP(this)) 3396 if (ONENAND_IS_DDP(this))
3379 this->options |= ONENAND_HAS_2PLANE; 3397 this->options |= ONENAND_HAS_2PLANE;
3380 else if (numbufs == 1) 3398 else if (numbufs == 1) {
3381 this->options |= ONENAND_HAS_4KB_PAGE; 3399 this->options |= ONENAND_HAS_4KB_PAGE;
3400 this->options |= ONENAND_HAS_CACHE_PROGRAM;
3401 }
3382 3402
3383 case ONENAND_DEVICE_DENSITY_2Gb: 3403 case ONENAND_DEVICE_DENSITY_2Gb:
3384 /* 2Gb DDP does not have 2 plane */ 3404 /* 2Gb DDP does not have 2 plane */
@@ -3399,7 +3419,11 @@ static void onenand_check_features(struct mtd_info *mtd)
3399 break; 3419 break;
3400 } 3420 }
3401 3421
3402 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3422 /* The MLC has 4KiB pagesize. */
3423 if (ONENAND_IS_MLC(this))
3424 this->options |= ONENAND_HAS_4KB_PAGE;
3425
3426 if (ONENAND_IS_4KB_PAGE(this))
3403 this->options &= ~ONENAND_HAS_2PLANE; 3427 this->options &= ~ONENAND_HAS_2PLANE;
3404 3428
3405 if (FLEXONENAND(this)) { 3429 if (FLEXONENAND(this)) {
@@ -3415,6 +3439,8 @@ static void onenand_check_features(struct mtd_info *mtd)
3415 printk(KERN_DEBUG "Chip has 2 plane\n"); 3439 printk(KERN_DEBUG "Chip has 2 plane\n");
3416 if (this->options & ONENAND_HAS_4KB_PAGE) 3440 if (this->options & ONENAND_HAS_4KB_PAGE)
3417 printk(KERN_DEBUG "Chip has 4KiB pagesize\n"); 3441 printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
3442 if (this->options & ONENAND_HAS_CACHE_PROGRAM)
3443 printk(KERN_DEBUG "Chip has cache program feature\n");
3418} 3444}
3419 3445
3420/** 3446/**
@@ -3831,7 +3857,7 @@ static int onenand_probe(struct mtd_info *mtd)
3831 /* The data buffer size is equal to page size */ 3857 /* The data buffer size is equal to page size */
3832 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); 3858 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
3833 /* We use the full BufferRAM */ 3859 /* We use the full BufferRAM */
3834 if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3860 if (ONENAND_IS_4KB_PAGE(this))
3835 mtd->writesize <<= 1; 3861 mtd->writesize <<= 1;
3836 3862
3837 mtd->oobsize = mtd->writesize >> 5; 3863 mtd->oobsize = mtd->writesize >> 5;
@@ -4054,6 +4080,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4054 mtd->block_isbad = onenand_block_isbad; 4080 mtd->block_isbad = onenand_block_isbad;
4055 mtd->block_markbad = onenand_block_markbad; 4081 mtd->block_markbad = onenand_block_markbad;
4056 mtd->owner = THIS_MODULE; 4082 mtd->owner = THIS_MODULE;
4083 mtd->writebufsize = mtd->writesize;
4057 4084
4058 /* Unlock whole block */ 4085 /* Unlock whole block */
4059 this->unlock_all(mtd); 4086 this->unlock_all(mtd);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 01ab5b3c453b..fc2c16a0fd1c 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -91,16 +91,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
91 for (j = 0; j < len; j++) { 91 for (j = 0; j < len; j++) {
92 /* No need to read pages fully, 92 /* No need to read pages fully,
93 * just read required OOB bytes */ 93 * just read required OOB bytes */
94 ret = onenand_bbt_read_oob(mtd, from + j * mtd->writesize + bd->offs, &ops); 94 ret = onenand_bbt_read_oob(mtd,
95 from + j * this->writesize + bd->offs, &ops);
95 96
96 /* If it is a initial bad block, just ignore it */ 97 /* If it is a initial bad block, just ignore it */
97 if (ret == ONENAND_BBT_READ_FATAL_ERROR) 98 if (ret == ONENAND_BBT_READ_FATAL_ERROR)
98 return -EIO; 99 return -EIO;
99 100
100 if (ret || check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) { 101 if (ret || check_short_pattern(&buf[j * scanlen],
102 scanlen, this->writesize, bd)) {
101 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6); 103 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
102 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 104 printk(KERN_INFO "OneNAND eraseblock %d is an "
103 i >> 1, (unsigned int) from); 105 "initial bad block\n", i >> 1);
104 mtd->ecc_stats.badblocks++; 106 mtd->ecc_stats.badblocks++;
105 break; 107 break;
106 } 108 }
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 0de7a05e6de0..a4c74a9ba430 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -651,7 +651,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
651 void __iomem *p; 651 void __iomem *p;
652 void *buf = (void *) buffer; 652 void *buf = (void *) buffer;
653 dma_addr_t dma_src, dma_dst; 653 dma_addr_t dma_src, dma_dst;
654 int err, page_dma = 0; 654 int err, ofs, page_dma = 0;
655 struct device *dev = &onenand->pdev->dev; 655 struct device *dev = &onenand->pdev->dev;
656 656
657 p = this->base + area; 657 p = this->base + area;
@@ -677,10 +677,13 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
677 if (!page) 677 if (!page)
678 goto normal; 678 goto normal;
679 679
680 /* Page offset */
681 ofs = ((size_t) buf & ~PAGE_MASK);
680 page_dma = 1; 682 page_dma = 1;
683
681 /* DMA routine */ 684 /* DMA routine */
682 dma_src = onenand->phys_base + (p - this->base); 685 dma_src = onenand->phys_base + (p - this->base);
683 dma_dst = dma_map_page(dev, page, 0, count, DMA_FROM_DEVICE); 686 dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
684 } else { 687 } else {
685 /* DMA routine */ 688 /* DMA routine */
686 dma_src = onenand->phys_base + (p - this->base); 689 dma_src = onenand->phys_base + (p - this->base);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fcdb7f65fe0b..0b8141fc5c26 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -425,12 +425,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
425 425
426 /* Read both LEB 0 and LEB 1 into memory */ 426 /* Read both LEB 0 and LEB 1 into memory */
427 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { 427 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
428 leb[seb->lnum] = vmalloc(ubi->vtbl_size); 428 leb[seb->lnum] = vzalloc(ubi->vtbl_size);
429 if (!leb[seb->lnum]) { 429 if (!leb[seb->lnum]) {
430 err = -ENOMEM; 430 err = -ENOMEM;
431 goto out_free; 431 goto out_free;
432 } 432 }
433 memset(leb[seb->lnum], 0, ubi->vtbl_size);
434 433
435 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 434 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
436 ubi->vtbl_size); 435 ubi->vtbl_size);
@@ -516,10 +515,9 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
516 int i; 515 int i;
517 struct ubi_vtbl_record *vtbl; 516 struct ubi_vtbl_record *vtbl;
518 517
519 vtbl = vmalloc(ubi->vtbl_size); 518 vtbl = vzalloc(ubi->vtbl_size);
520 if (!vtbl) 519 if (!vtbl)
521 return ERR_PTR(-ENOMEM); 520 return ERR_PTR(-ENOMEM);
522 memset(vtbl, 0, ubi->vtbl_size);
523 521
524 for (i = 0; i < ubi->vtbl_slots; i++) 522 for (i = 0; i < ubi->vtbl_slots; i++)
525 memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); 523 memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4c8bfc97fb4c..03823327db25 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2864,7 +2864,7 @@ config MLX4_CORE
2864 default n 2864 default n
2865 2865
2866config MLX4_DEBUG 2866config MLX4_DEBUG
2867 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) 2867 bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
2868 depends on MLX4_CORE 2868 depends on MLX4_CORE
2869 default y 2869 default y
2870 ---help--- 2870 ---help---
@@ -3389,8 +3389,7 @@ config NETCONSOLE
3389 3389
3390config NETCONSOLE_DYNAMIC 3390config NETCONSOLE_DYNAMIC
3391 bool "Dynamic reconfiguration of logging targets" 3391 bool "Dynamic reconfiguration of logging targets"
3392 depends on NETCONSOLE && SYSFS 3392 depends on NETCONSOLE && SYSFS && CONFIGFS_FS
3393 select CONFIGFS_FS
3394 help 3393 help
3395 This option enables the ability to dynamically reconfigure target 3394 This option enables the ability to dynamically reconfigure target
3396 parameters (interface, IP addresses, port numbers, MAC addresses) 3395 parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d849cf25..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
854} 854}
855 855
856/** 856/**
857 * ks8695_get_settings - Get device-specific settings. 857 * ks8695_wan_get_settings - Get device-specific settings.
858 * @ndev: The network device to read settings from 858 * @ndev: The network device to read settings from
859 * @cmd: The ethtool structure to read into 859 * @cmd: The ethtool structure to read into
860 */ 860 */
861static int 861static int
862ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 862ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
863{ 863{
864 struct ks8695_priv *ksp = netdev_priv(ndev); 864 struct ks8695_priv *ksp = netdev_priv(ndev);
865 u32 ctrl; 865 u32 ctrl;
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
870 SUPPORTED_TP | SUPPORTED_MII); 870 SUPPORTED_TP | SUPPORTED_MII);
871 cmd->transceiver = XCVR_INTERNAL; 871 cmd->transceiver = XCVR_INTERNAL;
872 872
873 /* Port specific extras */ 873 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
874 switch (ksp->dtype) { 874 cmd->port = PORT_MII;
875 case KS8695_DTYPE_HPNA: 875 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
876 cmd->phy_address = 0; 876 cmd->phy_address = 0;
877 /* not supported for HPNA */
878 cmd->autoneg = AUTONEG_DISABLE;
879 877
880 /* BUG: Erm, dtype hpna implies no phy regs */ 878 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881 /* 879 if ((ctrl & WMC_WAND) == 0) {
882 ctrl = readl(KS8695_MISC_VA + KS8695_HMC); 880 /* auto-negotiation is enabled */
883 cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; 881 cmd->advertising |= ADVERTISED_Autoneg;
884 cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; 882 if (ctrl & WMC_WANA100F)
885 */ 883 cmd->advertising |= ADVERTISED_100baseT_Full;
886 return -EOPNOTSUPP; 884 if (ctrl & WMC_WANA100H)
887 case KS8695_DTYPE_WAN: 885 cmd->advertising |= ADVERTISED_100baseT_Half;
888 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; 886 if (ctrl & WMC_WANA10F)
889 cmd->port = PORT_MII; 887 cmd->advertising |= ADVERTISED_10baseT_Full;
890 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); 888 if (ctrl & WMC_WANA10H)
891 cmd->phy_address = 0; 889 cmd->advertising |= ADVERTISED_10baseT_Half;
890 if (ctrl & WMC_WANAP)
891 cmd->advertising |= ADVERTISED_Pause;
892 cmd->autoneg = AUTONEG_ENABLE;
893
894 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
895 cmd->duplex = (ctrl & WMC_WDS) ?
896 DUPLEX_FULL : DUPLEX_HALF;
897 } else {
898 /* auto-negotiation is disabled */
899 cmd->autoneg = AUTONEG_DISABLE;
892 900
893 ctrl = readl(ksp->phyiface_regs + KS8695_WMC); 901 cmd->speed = (ctrl & WMC_WANF100) ?
894 if ((ctrl & WMC_WAND) == 0) { 902 SPEED_100 : SPEED_10;
895 /* auto-negotiation is enabled */ 903 cmd->duplex = (ctrl & WMC_WANFF) ?
896 cmd->advertising |= ADVERTISED_Autoneg; 904 DUPLEX_FULL : DUPLEX_HALF;
897 if (ctrl & WMC_WANA100F)
898 cmd->advertising |= ADVERTISED_100baseT_Full;
899 if (ctrl & WMC_WANA100H)
900 cmd->advertising |= ADVERTISED_100baseT_Half;
901 if (ctrl & WMC_WANA10F)
902 cmd->advertising |= ADVERTISED_10baseT_Full;
903 if (ctrl & WMC_WANA10H)
904 cmd->advertising |= ADVERTISED_10baseT_Half;
905 if (ctrl & WMC_WANAP)
906 cmd->advertising |= ADVERTISED_Pause;
907 cmd->autoneg = AUTONEG_ENABLE;
908
909 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
910 cmd->duplex = (ctrl & WMC_WDS) ?
911 DUPLEX_FULL : DUPLEX_HALF;
912 } else {
913 /* auto-negotiation is disabled */
914 cmd->autoneg = AUTONEG_DISABLE;
915
916 cmd->speed = (ctrl & WMC_WANF100) ?
917 SPEED_100 : SPEED_10;
918 cmd->duplex = (ctrl & WMC_WANFF) ?
919 DUPLEX_FULL : DUPLEX_HALF;
920 }
921 break;
922 case KS8695_DTYPE_LAN:
923 return -EOPNOTSUPP;
924 } 905 }
925 906
926 return 0; 907 return 0;
927} 908}
928 909
929/** 910/**
930 * ks8695_set_settings - Set device-specific settings. 911 * ks8695_wan_set_settings - Set device-specific settings.
931 * @ndev: The network device to configure 912 * @ndev: The network device to configure
932 * @cmd: The settings to configure 913 * @cmd: The settings to configure
933 */ 914 */
934static int 915static int
935ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 916ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
936{ 917{
937 struct ks8695_priv *ksp = netdev_priv(ndev); 918 struct ks8695_priv *ksp = netdev_priv(ndev);
938 u32 ctrl; 919 u32 ctrl;
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
956 ADVERTISED_100baseT_Full)) == 0) 937 ADVERTISED_100baseT_Full)) == 0)
957 return -EINVAL; 938 return -EINVAL;
958 939
959 switch (ksp->dtype) { 940 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
960 case KS8695_DTYPE_HPNA:
961 /* HPNA does not support auto-negotiation. */
962 return -EINVAL;
963 case KS8695_DTYPE_WAN:
964 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
965
966 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
967 WMC_WANA10F | WMC_WANA10H);
968 if (cmd->advertising & ADVERTISED_100baseT_Full)
969 ctrl |= WMC_WANA100F;
970 if (cmd->advertising & ADVERTISED_100baseT_Half)
971 ctrl |= WMC_WANA100H;
972 if (cmd->advertising & ADVERTISED_10baseT_Full)
973 ctrl |= WMC_WANA10F;
974 if (cmd->advertising & ADVERTISED_10baseT_Half)
975 ctrl |= WMC_WANA10H;
976
977 /* force a re-negotiation */
978 ctrl |= WMC_WANR;
979 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
980 break;
981 case KS8695_DTYPE_LAN:
982 return -EOPNOTSUPP;
983 }
984 941
942 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
943 WMC_WANA10F | WMC_WANA10H);
944 if (cmd->advertising & ADVERTISED_100baseT_Full)
945 ctrl |= WMC_WANA100F;
946 if (cmd->advertising & ADVERTISED_100baseT_Half)
947 ctrl |= WMC_WANA100H;
948 if (cmd->advertising & ADVERTISED_10baseT_Full)
949 ctrl |= WMC_WANA10F;
950 if (cmd->advertising & ADVERTISED_10baseT_Half)
951 ctrl |= WMC_WANA10H;
952
953 /* force a re-negotiation */
954 ctrl |= WMC_WANR;
955 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
985 } else { 956 } else {
986 switch (ksp->dtype) { 957 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
987 case KS8695_DTYPE_HPNA: 958
988 /* BUG: dtype_hpna implies no phy registers */ 959 /* disable auto-negotiation */
989 /* 960 ctrl |= WMC_WAND;
990 ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); 961 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
991 962
992 ctrl &= ~(HMC_HSS | HMC_HDS); 963 if (cmd->speed == SPEED_100)
993 if (cmd->speed == SPEED_100) 964 ctrl |= WMC_WANF100;
994 ctrl |= HMC_HSS; 965 if (cmd->duplex == DUPLEX_FULL)
995 if (cmd->duplex == DUPLEX_FULL) 966 ctrl |= WMC_WANFF;
996 ctrl |= HMC_HDS; 967
997 968 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
998 __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
999 */
1000 return -EOPNOTSUPP;
1001 case KS8695_DTYPE_WAN:
1002 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1003
1004 /* disable auto-negotiation */
1005 ctrl |= WMC_WAND;
1006 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
1007
1008 if (cmd->speed == SPEED_100)
1009 ctrl |= WMC_WANF100;
1010 if (cmd->duplex == DUPLEX_FULL)
1011 ctrl |= WMC_WANFF;
1012
1013 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1014 break;
1015 case KS8695_DTYPE_LAN:
1016 return -EOPNOTSUPP;
1017 }
1018 } 969 }
1019 970
1020 return 0; 971 return 0;
1021} 972}
1022 973
1023/** 974/**
1024 * ks8695_nwayreset - Restart the autonegotiation on the port. 975 * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
1025 * @ndev: The network device to restart autoneotiation on 976 * @ndev: The network device to restart autoneotiation on
1026 */ 977 */
1027static int 978static int
1028ks8695_nwayreset(struct net_device *ndev) 979ks8695_wan_nwayreset(struct net_device *ndev)
1029{ 980{
1030 struct ks8695_priv *ksp = netdev_priv(ndev); 981 struct ks8695_priv *ksp = netdev_priv(ndev);
1031 u32 ctrl; 982 u32 ctrl;
1032 983
1033 switch (ksp->dtype) { 984 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1034 case KS8695_DTYPE_HPNA:
1035 /* No phy means no autonegotiation on hpna */
1036 return -EINVAL;
1037 case KS8695_DTYPE_WAN:
1038 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1039
1040 if ((ctrl & WMC_WAND) == 0)
1041 writel(ctrl | WMC_WANR,
1042 ksp->phyiface_regs + KS8695_WMC);
1043 else
1044 /* auto-negotiation not enabled */
1045 return -EINVAL;
1046 break;
1047 case KS8695_DTYPE_LAN:
1048 return -EOPNOTSUPP;
1049 }
1050
1051 return 0;
1052}
1053 985
1054/** 986 if ((ctrl & WMC_WAND) == 0)
1055 * ks8695_get_link - Retrieve link status of network interface 987 writel(ctrl | WMC_WANR,
1056 * @ndev: The network interface to retrive the link status of. 988 ksp->phyiface_regs + KS8695_WMC);
1057 */ 989 else
1058static u32 990 /* auto-negotiation not enabled */
1059ks8695_get_link(struct net_device *ndev) 991 return -EINVAL;
1060{
1061 struct ks8695_priv *ksp = netdev_priv(ndev);
1062 u32 ctrl;
1063 992
1064 switch (ksp->dtype) {
1065 case KS8695_DTYPE_HPNA:
1066 /* HPNA always has link */
1067 return 1;
1068 case KS8695_DTYPE_WAN:
1069 /* WAN we can read the PHY for */
1070 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1071 return ctrl & WMC_WLS;
1072 case KS8695_DTYPE_LAN:
1073 return -EOPNOTSUPP;
1074 }
1075 return 0; 993 return 0;
1076} 994}
1077 995
1078/** 996/**
1079 * ks8695_get_pause - Retrieve network pause/flow-control advertising 997 * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
1080 * @ndev: The device to retrieve settings from 998 * @ndev: The device to retrieve settings from
1081 * @param: The structure to fill out with the information 999 * @param: The structure to fill out with the information
1082 */ 1000 */
1083static void 1001static void
1084ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) 1002ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1085{ 1003{
1086 struct ks8695_priv *ksp = netdev_priv(ndev); 1004 struct ks8695_priv *ksp = netdev_priv(ndev);
1087 u32 ctrl; 1005 u32 ctrl;
1088 1006
1089 switch (ksp->dtype) { 1007 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1090 case KS8695_DTYPE_HPNA:
1091 /* No phy link on hpna to configure */
1092 return;
1093 case KS8695_DTYPE_WAN:
1094 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1095
1096 /* advertise Pause */
1097 param->autoneg = (ctrl & WMC_WANAP);
1098 1008
1099 /* current Rx Flow-control */ 1009 /* advertise Pause */
1100 ctrl = ks8695_readreg(ksp, KS8695_DRXC); 1010 param->autoneg = (ctrl & WMC_WANAP);
1101 param->rx_pause = (ctrl & DRXC_RFCE);
1102 1011
1103 /* current Tx Flow-control */ 1012 /* current Rx Flow-control */
1104 ctrl = ks8695_readreg(ksp, KS8695_DTXC); 1013 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1105 param->tx_pause = (ctrl & DTXC_TFCE); 1014 param->rx_pause = (ctrl & DRXC_RFCE);
1106 break;
1107 case KS8695_DTYPE_LAN:
1108 /* The LAN's "phy" is a direct-attached switch */
1109 return;
1110 }
1111}
1112 1015
1113/** 1016 /* current Tx Flow-control */
1114 * ks8695_set_pause - Configure pause/flow-control 1017 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1115 * @ndev: The device to configure 1018 param->tx_pause = (ctrl & DTXC_TFCE);
1116 * @param: The pause parameters to set
1117 *
1118 * TODO: Implement this
1119 */
1120static int
1121ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1122{
1123 return -EOPNOTSUPP;
1124} 1019}
1125 1020
1126/** 1021/**
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1140static const struct ethtool_ops ks8695_ethtool_ops = { 1035static const struct ethtool_ops ks8695_ethtool_ops = {
1141 .get_msglevel = ks8695_get_msglevel, 1036 .get_msglevel = ks8695_get_msglevel,
1142 .set_msglevel = ks8695_set_msglevel, 1037 .set_msglevel = ks8695_set_msglevel,
1143 .get_settings = ks8695_get_settings, 1038 .get_drvinfo = ks8695_get_drvinfo,
1144 .set_settings = ks8695_set_settings, 1039};
1145 .nway_reset = ks8695_nwayreset, 1040
1146 .get_link = ks8695_get_link, 1041static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1147 .get_pauseparam = ks8695_get_pause, 1042 .get_msglevel = ks8695_get_msglevel,
1148 .set_pauseparam = ks8695_set_pause, 1043 .set_msglevel = ks8695_set_msglevel,
1044 .get_settings = ks8695_wan_get_settings,
1045 .set_settings = ks8695_wan_set_settings,
1046 .nway_reset = ks8695_wan_nwayreset,
1047 .get_link = ethtool_op_get_link,
1048 .get_pauseparam = ks8695_wan_get_pause,
1149 .get_drvinfo = ks8695_get_drvinfo, 1049 .get_drvinfo = ks8695_get_drvinfo,
1150}; 1050};
1151 1051
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev)
1541 1441
1542 /* driver system setup */ 1442 /* driver system setup */
1543 ndev->netdev_ops = &ks8695_netdev_ops; 1443 ndev->netdev_ops = &ks8695_netdev_ops;
1544 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1545 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1444 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1546 1445
1547 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); 1446 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev)
1608 if (ksp->phyiface_regs && ksp->link_irq == -1) { 1507 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1609 ks8695_init_switch(ksp); 1508 ks8695_init_switch(ksp);
1610 ksp->dtype = KS8695_DTYPE_LAN; 1509 ksp->dtype = KS8695_DTYPE_LAN;
1510 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1611 } else if (ksp->phyiface_regs && ksp->link_irq != -1) { 1511 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1612 ks8695_init_wan_phy(ksp); 1512 ks8695_init_wan_phy(ksp);
1613 ksp->dtype = KS8695_DTYPE_WAN; 1513 ksp->dtype = KS8695_DTYPE_WAN;
1514 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1614 } else { 1515 } else {
1615 /* No initialisation since HPNA does not have a PHY */ 1516 /* No initialisation since HPNA does not have a PHY */
1616 ksp->dtype = KS8695_DTYPE_HPNA; 1517 ksp->dtype = KS8695_DTYPE_HPNA;
1518 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1617 } 1519 }
1618 1520
1619 /* And bring up the net_device with the net core */ 1521 /* And bring up the net_device with the net core */
@@ -1742,7 +1644,7 @@ ks8695_cleanup(void)
1742module_init(ks8695_init); 1644module_init(ks8695_init);
1743module_exit(ks8695_cleanup); 1645module_exit(ks8695_cleanup);
1744 1646
1745MODULE_AUTHOR("Simtec Electronics") 1647MODULE_AUTHOR("Simtec Electronics");
1746MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1648MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1747MODULE_LICENSE("GPL"); 1649MODULE_LICENSE("GPL");
1748MODULE_ALIAS("platform:" MODULENAME); 1650MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb5..3824382faecc 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, 48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, 49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
51 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
51 /* required last entry */ 52 /* required last entry */
52 { 0 } 53 { 0 }
53}; 54};
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..a179cc6d79f2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1786 spin_lock_bh(&adapter->mcc_lock); 1786 spin_lock_bh(&adapter->mcc_lock);
1787 1787
1788 wrb = wrb_from_mccq(adapter); 1788 wrb = wrb_from_mccq(adapter);
1789 if (!wrb) {
1790 status = -EBUSY;
1791 goto err;
1792 }
1789 req = nonemb_cmd->va; 1793 req = nonemb_cmd->va;
1790 sge = nonembedded_sgl(wrb); 1794 sge = nonembedded_sgl(wrb);
1791 1795
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1801 1805
1802 status = be_mcc_notify_wait(adapter); 1806 status = be_mcc_notify_wait(adapter);
1803 1807
1808err:
1804 spin_unlock_bh(&adapter->mcc_lock); 1809 spin_unlock_bh(&adapter->mcc_lock);
1805 return status; 1810 return status;
1806} 1811}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152f..28a32a6c8bf1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
312 if (adapter->link_up != link_up) { 312 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1; 313 adapter->link_speed = -1;
314 if (link_up) { 314 if (link_up) {
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev); 315 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name); 316 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else { 317 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev); 318 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name); 319 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 } 320 }
@@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev)
2628 2626
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630 BE_NAPI_WEIGHT); 2628 BE_NAPI_WEIGHT);
2631
2632 netif_stop_queue(netdev);
2633} 2629}
2634 2630
2635static void be_unmap_pci_bars(struct be_adapter *adapter) 2631static void be_unmap_pci_bars(struct be_adapter *adapter)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b9fc5173aef..22abfb39d813 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1284,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
1284{ 1284{
1285 u32 emac_hashhi, emac_hashlo; 1285 u32 emac_hashhi, emac_hashlo;
1286 struct netdev_hw_addr *ha; 1286 struct netdev_hw_addr *ha;
1287 char *addrs;
1288 u32 crc; 1287 u32 crc;
1289 1288
1290 emac_hashhi = emac_hashlo = 0; 1289 emac_hashhi = emac_hashlo = 0;
1291 1290
1292 netdev_for_each_mc_addr(ha, dev) { 1291 netdev_for_each_mc_addr(ha, dev) {
1293 addrs = ha->addr; 1292 crc = ether_crc(ETH_ALEN, ha->addr);
1294
1295 /* skip non-multicast addresses */
1296 if (!(*addrs & 1))
1297 continue;
1298
1299 crc = ether_crc(ETH_ALEN, addrs);
1300 crc >>= 26; 1293 crc >>= 26;
1301 1294
1302 if (crc & 0x20) 1295 if (crc & 0x20)
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae91991..142d6047da27 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
275 275
276 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); 276 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
277 if (ioc_attr) { 277 if (ioc_attr) {
278 memset(ioc_attr, 0, sizeof(*ioc_attr));
279 spin_lock_irqsave(&bnad->bna_lock, flags); 278 spin_lock_irqsave(&bnad->bna_lock, flags);
280 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 279 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
281 spin_unlock_irqrestore(&bnad->bna_lock, flags); 280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf3464a..0ba59d5aeb7f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -7553,6 +7553,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
7553 !(data & ETH_FLAG_RXVLAN)) 7553 !(data & ETH_FLAG_RXVLAN))
7554 return -EINVAL; 7554 return -EINVAL;
7555 7555
7556 /* TSO with VLAN tag won't work with current firmware */
7557 if (!(data & ETH_FLAG_TXVLAN))
7558 return -EINVAL;
7559
7556 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7560 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7557 ETH_FLAG_TXVLAN); 7561 ETH_FLAG_TXVLAN);
7558 if (rc) 7562 if (rc)
@@ -7962,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7962 7966
7963 /* AER (Advanced Error Reporting) hooks */ 7967 /* AER (Advanced Error Reporting) hooks */
7964 err = pci_enable_pcie_error_reporting(pdev); 7968 err = pci_enable_pcie_error_reporting(pdev);
7965 if (err) { 7969 if (!err)
7966 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7970 bp->flags |= BNX2_FLAG_AER_ENABLED;
7967 "failed 0x%x\n", err);
7968 /* non-fatal, continue */
7969 }
7970 7971
7971 } else { 7972 } else {
7972 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7973 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8229 return 0; 8230 return 0;
8230 8231
8231err_out_unmap: 8232err_out_unmap:
8232 if (bp->flags & BNX2_FLAG_PCIE) 8233 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8233 pci_disable_pcie_error_reporting(pdev); 8234 pci_disable_pcie_error_reporting(pdev);
8235 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8236 }
8234 8237
8235 if (bp->regview) { 8238 if (bp->regview) {
8236 iounmap(bp->regview); 8239 iounmap(bp->regview);
@@ -8418,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8418 8421
8419 kfree(bp->temp_stats_blk); 8422 kfree(bp->temp_stats_blk);
8420 8423
8421 if (bp->flags & BNX2_FLAG_PCIE) 8424 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8422 pci_disable_pcie_error_reporting(pdev); 8425 pci_disable_pcie_error_reporting(pdev);
8426 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8427 }
8423 8428
8424 free_netdev(dev); 8429 free_netdev(dev);
8425 8430
@@ -8535,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8535 } 8540 }
8536 rtnl_unlock(); 8541 rtnl_unlock();
8537 8542
8538 if (!(bp->flags & BNX2_FLAG_PCIE)) 8543 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8539 return result; 8544 return result;
8540 8545
8541 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8546 err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe9..f459fb2f9add 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6741,6 +6741,7 @@ struct bnx2 {
6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
6743#define BNX2_FLAG_BROKEN_STATS 0x00002000 6743#define BNX2_FLAG_BROKEN_STATS 0x00002000
6744#define BNX2_FLAG_AER_ENABLED 0x00004000
6744 6745
6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6746 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6746 6747
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c9436..653c62475cb6 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-3" 25#define DRV_MODULE_VERSION "1.62.00-5"
26#define DRV_MODULE_RELDATE "2010/12/21" 26#define DRV_MODULE_RELDATE "2011/01/30"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f63989..548f5631c0dc 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -352,6 +352,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
353 /* forced only */ 353 /* forced only */
354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
355 /* Indicate whether to swap the external phy polarity */
356#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
357#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
358#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
355 359
356 u32 external_phy_config; 360 u32 external_phy_config;
357#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 361#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f391..dd1210fddfff 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1573,7 +1573,7 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1573 1573
1574 offset = phy->addr + ser_lane; 1574 offset = phy->addr + ser_lane;
1575 if (CHIP_IS_E2(bp)) 1575 if (CHIP_IS_E2(bp))
1576 aer_val = 0x2800 + offset - 1; 1576 aer_val = 0x3800 + offset - 1;
1577 else 1577 else
1578 aer_val = 0x3800 + offset; 1578 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1579 CL45_WR_OVER_CL22(bp, phy,
@@ -3166,7 +3166,23 @@ u8 bnx2x_set_led(struct link_params *params,
3166 if (!vars->link_up) 3166 if (!vars->link_up)
3167 break; 3167 break;
3168 case LED_MODE_ON: 3168 case LED_MODE_ON:
3169 if (SINGLE_MEDIA_DIRECT(params)) { 3169 if (params->phy[EXT_PHY1].type ==
3170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 /**
3173 * This is a work-around for E2+8727 Configurations
3174 */
3175 if (mode == LED_MODE_ON ||
3176 speed == SPEED_10000){
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3178 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3179
3180 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3181 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3182 (tmp | EMAC_LED_OVERRIDE));
3183 return rc;
3184 }
3185 } else if (SINGLE_MEDIA_DIRECT(params)) {
3170 /** 3186 /**
3171 * This is a work-around for HW issue found when link 3187 * This is a work-around for HW issue found when link
3172 * is up in CL73 3188 * is up in CL73
@@ -3854,11 +3870,14 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3854 pause_result); 3870 pause_result);
3855 } 3871 }
3856} 3872}
3857 3873static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3858static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3859 struct bnx2x_phy *phy, 3874 struct bnx2x_phy *phy,
3860 u8 port) 3875 u8 port)
3861{ 3876{
3877 u32 count = 0;
3878 u16 fw_ver1, fw_msgout;
3879 u8 rc = 0;
3880
3862 /* Boot port from external ROM */ 3881 /* Boot port from external ROM */
3863 /* EDC grst */ 3882 /* EDC grst */
3864 bnx2x_cl45_write(bp, phy, 3883 bnx2x_cl45_write(bp, phy,
@@ -3888,56 +3907,45 @@ static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3888 MDIO_PMA_REG_GEN_CTRL, 3907 MDIO_PMA_REG_GEN_CTRL,
3889 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3908 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3890 3909
3891 /* wait for 120ms for code download via SPI port */ 3910 /* Delay 100ms per the PHY specifications */
3892 msleep(120); 3911 msleep(100);
3912
3913 /* 8073 sometimes taking longer to download */
3914 do {
3915 count++;
3916 if (count > 300) {
3917 DP(NETIF_MSG_LINK,
3918 "bnx2x_8073_8727_external_rom_boot port %x:"
3919 "Download failed. fw version = 0x%x\n",
3920 port, fw_ver1);
3921 rc = -EINVAL;
3922 break;
3923 }
3924
3925 bnx2x_cl45_read(bp, phy,
3926 MDIO_PMA_DEVAD,
3927 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3928 bnx2x_cl45_read(bp, phy,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3931
3932 msleep(1);
3933 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3934 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3893 3936
3894 /* Clear ser_boot_ctl bit */ 3937 /* Clear ser_boot_ctl bit */
3895 bnx2x_cl45_write(bp, phy, 3938 bnx2x_cl45_write(bp, phy,
3896 MDIO_PMA_DEVAD, 3939 MDIO_PMA_DEVAD,
3897 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3940 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3898 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3941 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3899}
3900 3942
3901static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, 3943 DP(NETIF_MSG_LINK,
3902 struct bnx2x_phy *phy) 3944 "bnx2x_8073_8727_external_rom_boot port %x:"
3903{ 3945 "Download complete. fw version = 0x%x\n",
3904 u16 val; 3946 port, fw_ver1);
3905 bnx2x_cl45_read(bp, phy,
3906 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
3907
3908 if (val == 0) {
3909 /* Mustn't set low power mode in 8073 A0 */
3910 return;
3911 }
3912
3913 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3914 bnx2x_cl45_read(bp, phy,
3915 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3916 val &= ~(1<<13);
3917 bnx2x_cl45_write(bp, phy,
3918 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3919
3920 /* PLL controls */
3921 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3922 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3923 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3924 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3925 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3926
3927 /* Tx Controls */
3928 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3929 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3930 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3931
3932 /* Rx Controls */
3933 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3934 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
3935 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
3936 3947
3937 /* Enable PLL sequencer (use read-modify-write to set bit 13) */ 3948 return rc;
3938 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3939 val |= (1<<13);
3940 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3941} 3949}
3942 3950
3943/******************************************************************/ 3951/******************************************************************/
@@ -4098,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4098 4106
4099 bnx2x_8073_set_pause_cl37(params, phy, vars); 4107 bnx2x_8073_set_pause_cl37(params, phy, vars);
4100 4108
4101 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
4102
4103 bnx2x_cl45_read(bp, phy, 4109 bnx2x_cl45_read(bp, phy,
4104 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 4110 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4105 4111
@@ -4108,6 +4114,25 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4108 4114
4109 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4115 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4110 4116
4117 /**
4118 * If this is forced speed, set to KR or KX (all other are not
4119 * supported)
4120 */
4121 /* Swap polarity if required - Must be done only in non-1G mode */
4122 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4123 /* Configure the 8073 to swap _P and _N of the KR lines */
4124 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4125 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4126 bnx2x_cl45_read(bp, phy,
4127 MDIO_PMA_DEVAD,
4128 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4129 bnx2x_cl45_write(bp, phy,
4130 MDIO_PMA_DEVAD,
4131 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4132 (val | (3<<9)));
4133 }
4134
4135
4111 /* Enable CL37 BAM */ 4136 /* Enable CL37 BAM */
4112 if (REG_RD(bp, params->shmem_base + 4137 if (REG_RD(bp, params->shmem_base +
4113 offsetof(struct shmem_region, dev_info. 4138 offsetof(struct shmem_region, dev_info.
@@ -4314,8 +4339,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4314 } 4339 }
4315 4340
4316 if (link_up) { 4341 if (link_up) {
4342 /* Swap polarity if required */
4343 if (params->lane_config &
4344 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4345 /* Configure the 8073 to swap P and N of the KR lines */
4346 bnx2x_cl45_read(bp, phy,
4347 MDIO_XS_DEVAD,
4348 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4349 /**
4350 * Set bit 3 to invert Rx in 1G mode and clear this bit
4351 * when it`s in 10G mode.
4352 */
4353 if (vars->line_speed == SPEED_1000) {
4354 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4355 "the 8073\n");
4356 val1 |= (1<<3);
4357 } else
4358 val1 &= ~(1<<3);
4359
4360 bnx2x_cl45_write(bp, phy,
4361 MDIO_XS_DEVAD,
4362 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4363 val1);
4364 }
4317 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 4365 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4318 bnx2x_8073_resolve_fc(phy, params, vars); 4366 bnx2x_8073_resolve_fc(phy, params, vars);
4367 vars->duplex = DUPLEX_FULL;
4319 } 4368 }
4320 return link_up; 4369 return link_up;
4321} 4370}
@@ -5062,6 +5111,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5062 else 5111 else
5063 vars->line_speed = SPEED_10000; 5112 vars->line_speed = SPEED_10000;
5064 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5113 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5114 vars->duplex = DUPLEX_FULL;
5065 } 5115 }
5066 return link_up; 5116 return link_up;
5067} 5117}
@@ -5758,8 +5808,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5758 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 5808 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5759 params->port); 5809 params->port);
5760 } 5810 }
5761 if (link_up) 5811 if (link_up) {
5762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5812 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5813 vars->duplex = DUPLEX_FULL;
5814 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5815 }
5763 5816
5764 if ((DUAL_MEDIA(params)) && 5817 if ((DUAL_MEDIA(params)) &&
5765 (phy->req_line_speed == SPEED_1000)) { 5818 (phy->req_line_speed == SPEED_1000)) {
@@ -5875,10 +5928,26 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
5875 MDIO_PMA_REG_8481_LED2_MASK, 5928 MDIO_PMA_REG_8481_LED2_MASK,
5876 0x18); 5929 0x18);
5877 5930
5931 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5878 bnx2x_cl45_write(bp, phy, 5932 bnx2x_cl45_write(bp, phy,
5879 MDIO_PMA_DEVAD, 5933 MDIO_PMA_DEVAD,
5880 MDIO_PMA_REG_8481_LED3_MASK, 5934 MDIO_PMA_REG_8481_LED3_MASK,
5881 0x0040); 5935 0x0006);
5936
5937 /* Select the closest activity blink rate to that in 10/100/1000 */
5938 bnx2x_cl45_write(bp, phy,
5939 MDIO_PMA_DEVAD,
5940 MDIO_PMA_REG_8481_LED3_BLINK,
5941 0);
5942
5943 bnx2x_cl45_read(bp, phy,
5944 MDIO_PMA_DEVAD,
5945 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
5946 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
5947
5948 bnx2x_cl45_write(bp, phy,
5949 MDIO_PMA_DEVAD,
5950 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
5882 5951
5883 /* 'Interrupt Mask' */ 5952 /* 'Interrupt Mask' */
5884 bnx2x_cl45_write(bp, phy, 5953 bnx2x_cl45_write(bp, phy,
@@ -6126,6 +6195,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6126 /* Check link 10G */ 6195 /* Check link 10G */
6127 if (val2 & (1<<11)) { 6196 if (val2 & (1<<11)) {
6128 vars->line_speed = SPEED_10000; 6197 vars->line_speed = SPEED_10000;
6198 vars->duplex = DUPLEX_FULL;
6129 link_up = 1; 6199 link_up = 1;
6130 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6200 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6131 } else { /* Check Legacy speed link */ 6201 } else { /* Check Legacy speed link */
@@ -6405,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6405 MDIO_PMA_DEVAD, 6475 MDIO_PMA_DEVAD,
6406 MDIO_PMA_REG_8481_LED1_MASK, 6476 MDIO_PMA_REG_8481_LED1_MASK,
6407 0x80); 6477 0x80);
6478
6479 /* Tell LED3 to blink on source */
6480 bnx2x_cl45_read(bp, phy,
6481 MDIO_PMA_DEVAD,
6482 MDIO_PMA_REG_8481_LINK_SIGNAL,
6483 &val);
6484 val &= ~(7<<6);
6485 val |= (1<<6); /* A83B[8:6]= 1 */
6486 bnx2x_cl45_write(bp, phy,
6487 MDIO_PMA_DEVAD,
6488 MDIO_PMA_REG_8481_LINK_SIGNAL,
6489 val);
6408 } 6490 }
6409 break; 6491 break;
6410 } 6492 }
@@ -6489,6 +6571,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6489 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6571 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6490 &val2); 6572 &val2);
6491 vars->line_speed = SPEED_10000; 6573 vars->line_speed = SPEED_10000;
6574 vars->duplex = DUPLEX_FULL;
6492 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 6575 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6493 val2, (val2 & (1<<14))); 6576 val2, (val2 & (1<<14)));
6494 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6577 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -7605,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7605 struct bnx2x_phy phy[PORT_MAX]; 7688 struct bnx2x_phy phy[PORT_MAX];
7606 struct bnx2x_phy *phy_blk[PORT_MAX]; 7689 struct bnx2x_phy *phy_blk[PORT_MAX];
7607 u16 val; 7690 u16 val;
7608 s8 port; 7691 s8 port = 0;
7609 s8 port_of_path = 0; 7692 s8 port_of_path = 0;
7610 7693 u32 swap_val, swap_override;
7611 bnx2x_ext_phy_hw_reset(bp, 0); 7694 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7695 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7696 port ^= (swap_val && swap_override);
7697 bnx2x_ext_phy_hw_reset(bp, port);
7612 /* PART1 - Reset both phys */ 7698 /* PART1 - Reset both phys */
7613 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7699 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7614 u32 shmem_base, shmem2_base; 7700 u32 shmem_base, shmem2_base;
@@ -7663,7 +7749,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7663 7749
7664 /* PART2 - Download firmware to both phys */ 7750 /* PART2 - Download firmware to both phys */
7665 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7751 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7666 u16 fw_ver1;
7667 if (CHIP_IS_E2(bp)) 7752 if (CHIP_IS_E2(bp))
7668 port_of_path = 0; 7753 port_of_path = 0;
7669 else 7754 else
@@ -7671,19 +7756,9 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7671 7756
7672 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7757 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7673 phy_blk[port]->addr); 7758 phy_blk[port]->addr);
7674 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7759 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7675 port_of_path); 7760 port_of_path))
7676
7677 bnx2x_cl45_read(bp, phy_blk[port],
7678 MDIO_PMA_DEVAD,
7679 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7680 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7681 DP(NETIF_MSG_LINK,
7682 "bnx2x_8073_common_init_phy port %x:"
7683 "Download failed. fw version = 0x%x\n",
7684 port, fw_ver1);
7685 return -EINVAL; 7761 return -EINVAL;
7686 }
7687 7762
7688 /* Only set bit 10 = 1 (Tx power down) */ 7763 /* Only set bit 10 = 1 (Tx power down) */
7689 bnx2x_cl45_read(bp, phy_blk[port], 7764 bnx2x_cl45_read(bp, phy_blk[port],
@@ -7848,27 +7923,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7848 } 7923 }
7849 /* PART2 - Download firmware to both phys */ 7924 /* PART2 - Download firmware to both phys */
7850 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7925 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7851 u16 fw_ver1;
7852 if (CHIP_IS_E2(bp)) 7926 if (CHIP_IS_E2(bp))
7853 port_of_path = 0; 7927 port_of_path = 0;
7854 else 7928 else
7855 port_of_path = port; 7929 port_of_path = port;
7856 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7930 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7857 phy_blk[port]->addr); 7931 phy_blk[port]->addr);
7858 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7932 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7859 port_of_path); 7933 port_of_path))
7860 bnx2x_cl45_read(bp, phy_blk[port],
7861 MDIO_PMA_DEVAD,
7862 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7863 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7864 DP(NETIF_MSG_LINK,
7865 "bnx2x_8727_common_init_phy port %x:"
7866 "Download failed. fw version = 0x%x\n",
7867 port, fw_ver1);
7868 return -EINVAL; 7934 return -EINVAL;
7869 }
7870 }
7871 7935
7936 }
7872 return 0; 7937 return 0;
7873} 7938}
7874 7939
@@ -7916,6 +7981,7 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7916 u32 shmem2_base_path[], u32 chip_id) 7981 u32 shmem2_base_path[], u32 chip_id)
7917{ 7982{
7918 u8 rc = 0; 7983 u8 rc = 0;
7984 u32 phy_ver;
7919 u8 phy_index; 7985 u8 phy_index;
7920 u32 ext_phy_type, ext_phy_config; 7986 u32 ext_phy_type, ext_phy_config;
7921 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 7987 DP(NETIF_MSG_LINK, "Begin common phy init\n");
@@ -7923,6 +7989,16 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7923 if (CHIP_REV_IS_EMUL(bp)) 7989 if (CHIP_REV_IS_EMUL(bp))
7924 return 0; 7990 return 0;
7925 7991
7992 /* Check if common init was already done */
7993 phy_ver = REG_RD(bp, shmem_base_path[0] +
7994 offsetof(struct shmem_region,
7995 port_mb[PORT_0].ext_phy_fw_version));
7996 if (phy_ver) {
7997 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
7998 phy_ver);
7999 return 0;
8000 }
8001
7926 /* Read the ext_phy_type for arbitrary port(0) */ 8002 /* Read the ext_phy_type for arbitrary port(0) */
7927 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 8003 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7928 phy_index++) { 8004 phy_index++) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1e..d584d32c747d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2301 /* accept matched ucast */ 2301 /* accept matched ucast */
2302 drop_all_ucast = 0; 2302 drop_all_ucast = 0;
2303 } 2303 }
2304 if (filters & BNX2X_ACCEPT_MULTICAST) { 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
2305 /* accept matched mcast */ 2305 /* accept matched mcast */
2306 drop_all_mcast = 0; 2306 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp)) 2307
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2314 /* accept all mcast */ 2309 /* accept all mcast */
2315 drop_all_ucast = 0; 2310 drop_all_ucast = 0;
@@ -4281,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4276 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4282 BNX2X_ACCEPT_MULTICAST; 4277 BNX2X_ACCEPT_MULTICAST;
4283#ifdef BCM_CNIC 4278#ifdef BCM_CNIC
4284 cl_id = bnx2x_fcoe(bp, cl_id); 4279 if (!NO_FCOE(bp)) {
4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4280 cl_id = bnx2x_fcoe(bp, cl_id);
4286 BNX2X_ACCEPT_MULTICAST); 4281 bnx2x_rxq_set_mac_filters(bp, cl_id,
4282 BNX2X_ACCEPT_UNICAST |
4283 BNX2X_ACCEPT_MULTICAST);
4284 }
4287#endif 4285#endif
4288 break; 4286 break;
4289 4287
@@ -4291,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4289 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4292 BNX2X_ACCEPT_ALL_MULTICAST; 4290 BNX2X_ACCEPT_ALL_MULTICAST;
4293#ifdef BCM_CNIC 4291#ifdef BCM_CNIC
4294 cl_id = bnx2x_fcoe(bp, cl_id); 4292 /*
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4293 * Prevent duplication of multicast packets by configuring FCoE
4296 BNX2X_ACCEPT_MULTICAST); 4294 * L2 Client to receive only matched unicast frames.
4295 */
4296 if (!NO_FCOE(bp)) {
4297 cl_id = bnx2x_fcoe(bp, cl_id);
4298 bnx2x_rxq_set_mac_filters(bp, cl_id,
4299 BNX2X_ACCEPT_UNICAST);
4300 }
4297#endif 4301#endif
4298 break; 4302 break;
4299 4303
4300 case BNX2X_RX_MODE_PROMISC: 4304 case BNX2X_RX_MODE_PROMISC:
4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE; 4305 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4302#ifdef BCM_CNIC 4306#ifdef BCM_CNIC
4303 cl_id = bnx2x_fcoe(bp, cl_id); 4307 /*
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4308 * Prevent packets duplication by configuring DROP_ALL for FCoE
4305 BNX2X_ACCEPT_MULTICAST); 4309 * L2 Client.
4310 */
4311 if (!NO_FCOE(bp)) {
4312 cl_id = bnx2x_fcoe(bp, cl_id);
4313 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4314 }
4306#endif 4315#endif
4307 /* pass management unicast packets as well */ 4316 /* pass management unicast packets as well */
4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4317 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5305,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5296 } 5305 }
5297 } 5306 }
5298 5307
5299 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5300 bp->common.shmem_base,
5301 bp->common.shmem2_base);
5302
5303 bnx2x_setup_fan_failure_detection(bp); 5308 bnx2x_setup_fan_failure_detection(bp);
5304 5309
5305 /* clear PXP2 attentions */ 5310 /* clear PXP2 attentions */
@@ -5503,9 +5508,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5503 5508
5504 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5509 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5505 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5510 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5506 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5507 bp->common.shmem_base,
5508 bp->common.shmem2_base);
5509 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, 5511 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5510 bp->common.shmem2_base, port)) { 5512 bp->common.shmem2_base, port)) {
5511 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5513 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -8379,6 +8381,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8381 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8380 bp->mdio.prtad = 8382 bp->mdio.prtad =
8381 XGXS_EXT_PHY_ADDR(ext_phy_config); 8383 XGXS_EXT_PHY_ADDR(ext_phy_config);
8384
8385 /*
8386 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8387 * In MF mode, it is set to cover self test cases
8388 */
8389 if (IS_MF(bp))
8390 bp->port.need_hw_lock = 1;
8391 else
8392 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8393 bp->common.shmem_base,
8394 bp->common.shmem2_base);
8382} 8395}
8383 8396
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8397static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d61..e01330bb36c7 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6194,7 +6194,11 @@ Theotherbitsarereservedandshouldbezero*/
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6197#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6198#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6197 6199
6200#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6201#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6198 6202
6199#define IGU_FUNC_BASE 0x0400 6203#define IGU_FUNC_BASE 0x0400
6200 6204
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e2bb39..1024ae158227 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2470 if (!(dev->flags & IFF_MASTER)) 2470 if (!(dev->flags & IFF_MASTER))
2471 goto out; 2471 goto out;
2472 2472
2473 skb = skb_share_check(skb, GFP_ATOMIC);
2474 if (!skb)
2475 goto out;
2476
2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2477 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2474 goto out; 2478 goto out;
2475 2479
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c65129..5c6fba802f2b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
326 goto out; 326 goto out;
327 } 327 }
328 328
329 skb = skb_share_check(skb, GFP_ATOMIC);
330 if (!skb)
331 goto out;
332
329 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
330 goto out; 334 goto out;
331 335
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b85acf1..163e0b06eaa5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2733 if (!slave || !slave_do_arp_validate(bond, slave)) 2733 if (!slave || !slave_do_arp_validate(bond, slave))
2734 goto out_unlock; 2734 goto out_unlock;
2735 2735
2736 skb = skb_share_check(skb, GFP_ATOMIC);
2737 if (!skb)
2738 goto out_unlock;
2739
2736 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2740 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
2737 goto out_unlock; 2741 goto out_unlock;
2738 2742
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db60ade9..5dec456fd4a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
23 23
24 As only the sending and receiving of CAN frames is implemented, this 24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from: 25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de 26 www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
27 27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach, 28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see 29 slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -117,6 +117,8 @@ source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/usb/Kconfig" 118source "drivers/net/can/usb/Kconfig"
119 119
120source "drivers/net/can/softing/Kconfig"
121
120config CAN_DEBUG_DEVICES 122config CAN_DEBUG_DEVICES
121 bool "CAN devices debugging messages" 123 bool "CAN devices debugging messages"
122 depends on CAN 124 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159ba3f9..53c82a71778e 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o
9can-dev-y := dev.o 9can-dev-y := dev.o
10 10
11obj-y += usb/ 11obj-y += usb/
12obj-y += softing/
12 13
13obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
14obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/rtnetlink.h>
33#include <linux/skbuff.h> 34#include <linux/skbuff.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/string.h> 36#include <linux/string.h>
@@ -40,22 +41,23 @@
40 41
41#include <mach/board.h> 42#include <mach/board.h>
42 43
43#define AT91_NAPI_WEIGHT 12 44#define AT91_NAPI_WEIGHT 11
44 45
45/* 46/*
46 * RX/TX Mailbox split 47 * RX/TX Mailbox split
47 * don't dare to touch 48 * don't dare to touch
48 */ 49 */
49#define AT91_MB_RX_NUM 12 50#define AT91_MB_RX_NUM 11
50#define AT91_MB_TX_SHIFT 2 51#define AT91_MB_TX_SHIFT 2
51 52
52#define AT91_MB_RX_FIRST 0 53#define AT91_MB_RX_FIRST 1
53#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) 54#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
54 55
55#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) 56#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
56#define AT91_MB_RX_SPLIT 8 57#define AT91_MB_RX_SPLIT 8
57#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) 58#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
58#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) 59#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
60 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
59 61
60#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) 62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
61#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) 63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
168 170
169 struct clk *clk; 171 struct clk *clk;
170 struct at91_can_data *pdata; 172 struct at91_can_data *pdata;
173
174 canid_t mb0_id;
171}; 175};
172 176
173static struct can_bittiming_const at91_bittiming_const = { 177static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
220 set_mb_mode_prio(priv, mb, mode, 0); 224 set_mb_mode_prio(priv, mb, mode, 0);
221} 225}
222 226
227static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
228{
229 u32 reg_mid;
230
231 if (can_id & CAN_EFF_FLAG)
232 reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
233 else
234 reg_mid = (can_id & CAN_SFF_MASK) << 18;
235
236 return reg_mid;
237}
238
223/* 239/*
224 * Swtich transceiver on or off 240 * Swtich transceiver on or off
225 */ 241 */
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
233{ 249{
234 struct at91_priv *priv = netdev_priv(dev); 250 struct at91_priv *priv = netdev_priv(dev);
235 unsigned int i; 251 unsigned int i;
252 u32 reg_mid;
236 253
237 /* 254 /*
238 * The first 12 mailboxes are used as a reception FIFO. The 255 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
239 * last mailbox is configured with overwrite option. The 256 * mailbox is disabled. The next 11 mailboxes are used as a
240 * overwrite flag indicates a FIFO overflow. 257 * reception FIFO. The last mailbox is configured with
258 * overwrite option. The overwrite flag indicates a FIFO
259 * overflow.
241 */ 260 */
261 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
262 for (i = 0; i < AT91_MB_RX_FIRST; i++) {
263 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
264 at91_write(priv, AT91_MID(i), reg_mid);
265 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
266 }
267
242 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 268 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 269 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 270 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
255 281
256 /* Reset tx and rx helper pointers */ 282 /* Reset tx and rx helper pointers */
257 priv->tx_next = priv->tx_echo = priv->rx_next = 0; 283 priv->tx_next = priv->tx_echo = 0;
284 priv->rx_next = AT91_MB_RX_FIRST;
258} 285}
259 286
260static int at91_set_bittiming(struct net_device *dev) 287static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 399 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
373 return NETDEV_TX_BUSY; 400 return NETDEV_TX_BUSY;
374 } 401 }
375 402 reg_mid = at91_can_id_to_reg_mid(cf->can_id);
376 if (cf->can_id & CAN_EFF_FLAG)
377 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
378 else
379 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
380
381 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | 403 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
382 (cf->can_dlc << 16) | AT91_MCR_MTCR; 404 (cf->can_dlc << 16) | AT91_MCR_MTCR;
383 405
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
539 * 561 *
540 * Theory of Operation: 562 * Theory of Operation:
541 * 563 *
542 * 12 of the 16 mailboxes on the chip are reserved for RX. we split 564 * 11 of the 16 mailboxes on the chip are reserved for RX. we split
543 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. 565 * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
544 * 566 *
545 * Like it or not, but the chip always saves a received CAN message 567 * Like it or not, but the chip always saves a received CAN message
546 * into the first free mailbox it finds (starting with the 568 * into the first free mailbox it finds (starting with the
547 * lowest). This makes it very difficult to read the messages in the 569 * lowest). This makes it very difficult to read the messages in the
548 * right order from the chip. This is how we work around that problem: 570 * right order from the chip. This is how we work around that problem:
549 * 571 *
550 * The first message goes into mb nr. 0 and issues an interrupt. All 572 * The first message goes into mb nr. 1 and issues an interrupt. All
551 * rx ints are disabled in the interrupt handler and a napi poll is 573 * rx ints are disabled in the interrupt handler and a napi poll is
552 * scheduled. We read the mailbox, but do _not_ reenable the mb (to 574 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
553 * receive another message). 575 * receive another message).
554 * 576 *
555 * lower mbxs upper 577 * lower mbxs upper
556 * ______^______ __^__ 578 * ____^______ __^__
557 * / \ / \ 579 * / \ / \
558 * +-+-+-+-+-+-+-+-++-+-+-+-+ 580 * +-+-+-+-+-+-+-+-++-+-+-+-+
559 * |x|x|x|x|x|x|x|x|| | | | | 581 * | |x|x|x|x|x|x|x|| | | | |
560 * +-+-+-+-+-+-+-+-++-+-+-+-+ 582 * +-+-+-+-+-+-+-+-++-+-+-+-+
561 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail 583 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
562 * 0 1 2 3 4 5 6 7 8 9 0 1 / box 584 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
585 * ^
586 * |
587 * \
588 * unused, due to chip bug
563 * 589 *
564 * The variable priv->rx_next points to the next mailbox to read a 590 * The variable priv->rx_next points to the next mailbox to read a
565 * message from. As long we're in the lower mailboxes we just read the 591 * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
590 "order of incoming frames cannot be guaranteed\n"); 616 "order of incoming frames cannot be guaranteed\n");
591 617
592 again: 618 again:
593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 619 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
594 mb < AT91_MB_RX_NUM && quota > 0; 620 mb < AT91_MB_RX_LAST + 1 && quota > 0;
595 reg_sr = at91_read(priv, AT91_SR), 621 reg_sr = at91_read(priv, AT91_SR),
596 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { 622 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
597 at91_read_msg(dev, mb); 623 at91_read_msg(dev, mb);
598 624
599 /* reactivate mailboxes */ 625 /* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
610 636
611 /* upper group completed, look again in lower */ 637 /* upper group completed, look again in lower */
612 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 638 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
613 quota > 0 && mb >= AT91_MB_RX_NUM) { 639 quota > 0 && mb > AT91_MB_RX_LAST) {
614 priv->rx_next = 0; 640 priv->rx_next = AT91_MB_RX_FIRST;
615 goto again; 641 goto again;
616 } 642 }
617 643
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
1037 .ndo_start_xmit = at91_start_xmit, 1063 .ndo_start_xmit = at91_start_xmit,
1038}; 1064};
1039 1065
1066static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
1067 struct device_attribute *attr, char *buf)
1068{
1069 struct at91_priv *priv = netdev_priv(to_net_dev(dev));
1070
1071 if (priv->mb0_id & CAN_EFF_FLAG)
1072 return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
1073 else
1074 return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
1075}
1076
1077static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1078 struct device_attribute *attr, const char *buf, size_t count)
1079{
1080 struct net_device *ndev = to_net_dev(dev);
1081 struct at91_priv *priv = netdev_priv(ndev);
1082 unsigned long can_id;
1083 ssize_t ret;
1084 int err;
1085
1086 rtnl_lock();
1087
1088 if (ndev->flags & IFF_UP) {
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 err = strict_strtoul(buf, 0, &can_id);
1094 if (err) {
1095 ret = err;
1096 goto out;
1097 }
1098
1099 if (can_id & CAN_EFF_FLAG)
1100 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
1101 else
1102 can_id &= CAN_SFF_MASK;
1103
1104 priv->mb0_id = can_id;
1105 ret = count;
1106
1107 out:
1108 rtnl_unlock();
1109 return ret;
1110}
1111
1112static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1114
1115static struct attribute *at91_sysfs_attrs[] = {
1116 &dev_attr_mb0_id.attr,
1117 NULL,
1118};
1119
1120static struct attribute_group at91_sysfs_attr_group = {
1121 .attrs = at91_sysfs_attrs,
1122};
1123
1040static int __devinit at91_can_probe(struct platform_device *pdev) 1124static int __devinit at91_can_probe(struct platform_device *pdev)
1041{ 1125{
1042 struct net_device *dev; 1126 struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1082 dev->netdev_ops = &at91_netdev_ops; 1166 dev->netdev_ops = &at91_netdev_ops;
1083 dev->irq = irq; 1167 dev->irq = irq;
1084 dev->flags |= IFF_ECHO; 1168 dev->flags |= IFF_ECHO;
1169 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1085 1170
1086 priv = netdev_priv(dev); 1171 priv = netdev_priv(dev);
1087 priv->can.clock.freq = clk_get_rate(clk); 1172 priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1093 priv->dev = dev; 1178 priv->dev = dev;
1094 priv->clk = clk; 1179 priv->clk = clk;
1095 priv->pdata = pdev->dev.platform_data; 1180 priv->pdata = pdev->dev.platform_data;
1181 priv->mb0_id = 0x7ff;
1096 1182
1097 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1183 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1098 1184
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a739..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1618 return count; 1618 return count;
1619} 1619}
1620 1620
1621static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, 1621static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
1622 ican3_sysfs_set_term); 1622 ican3_sysfs_set_term);
1623 1623
1624static struct attribute *ican3_sysfs_attrs[] = { 1624static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
185 185
186static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
187 .name = KBUILD_MODNAME, 187 .name = KBUILD_MODNAME,
188 .tseg1_min = 1, 188 .tseg1_min = 2,
189 .tseg1_max = 16, 189 .tseg1_max = 16,
190 .tseg2_min = 1, 190 .tseg2_min = 1,
191 .tseg2_max = 8, 191 .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
959 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
960 960
961 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi) 962 if (priv->use_msi)
964 pci_disable_msi(priv->dev); 963 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 964 pci_release_regions(pdev);
966 pci_disable_device(pdev); 965 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL); 966 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
968 pci_iounmap(pdev, priv->regs);
969 free_candev(priv->ndev); 969 free_candev(priv->ndev);
970} 970}
971 971
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1238 priv->use_msi = 0; 1238 priv->use_msi = 0;
1239 } else { 1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n"); 1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 pci_set_master(pdev);
1241 priv->use_msi = 1; 1242 priv->use_msi = 1;
1242 } 1243 }
1243 1244
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..8ba81b3ddd90
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
1config CAN_SOFTING
2 tristate "Softing Gmbh CAN generic support"
3 depends on CAN_DEV && HAS_IOMEM
4 ---help---
5 Support for CAN cards from Softing Gmbh & some cards
6 from Vector Gmbh.
7 Softing Gmbh CAN cards come with 1 or 2 physical busses.
8 Those cards typically use Dual Port RAM to communicate
9 with the host CPU. The interface is then identical for PCI
10 and PCMCIA cards. This driver operates on a platform device,
11 which has been created by softing_cs or softing_pci driver.
12 Warning:
13 The API of the card does not allow fine control per bus, but
14 controls the 2 busses on the card together.
15 As such, some actions (start/stop/busoff recovery) on 1 bus
16 must bring down the other bus too temporarily.
17
18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA
21 select CAN_SOFTING
22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh.
25 You need firmware for these, which you can get at
26 http://developer.berlios.de/projects/socketcan/
27 This version of the driver is written against
28 firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
29 In order to use the card as CAN device, you need the Softing generic
30 support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
1
2softing-y := softing_main.o softing_fw.o
3obj-$(CONFIG_CAN_SOFTING) += softing.o
4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
5
6ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
1/*
2 * softing common interfaces
3 *
4 * by Kurt Van Dijck, 2008-2010
5 */
6
7#include <linux/atomic.h>
8#include <linux/netdevice.h>
9#include <linux/ktime.h>
10#include <linux/mutex.h>
11#include <linux/spinlock.h>
12#include <linux/can.h>
13#include <linux/can/dev.h>
14
15#include "softing_platform.h"
16
17struct softing;
18
19struct softing_priv {
20 struct can_priv can; /* must be the first member! */
21 struct net_device *netdev;
22 struct softing *card;
23 struct {
24 int pending;
25 /* variables wich hold the circular buffer */
26 int echo_put;
27 int echo_get;
28 } tx;
29 struct can_bittiming_const btr_const;
30 int index;
31 uint8_t output;
32 uint16_t chip;
33};
34#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
35
36struct softing {
37 const struct softing_platform_data *pdat;
38 struct platform_device *pdev;
39 struct net_device *net[2];
40 spinlock_t spin; /* protect this structure & DPRAM access */
41 ktime_t ts_ref;
42 ktime_t ts_overflow; /* timestamp overflow value, in ktime */
43
44 struct {
45 /* indication of firmware status */
46 int up;
47 /* protection of the 'up' variable */
48 struct mutex lock;
49 } fw;
50 struct {
51 int nr;
52 int requested;
53 int svc_count;
54 unsigned int dpram_position;
55 } irq;
56 struct {
57 int pending;
58 int last_bus;
59 /*
60 * keep the bus that last tx'd a message,
61 * in order to let every netdev queue resume
62 */
63 } tx;
64 __iomem uint8_t *dpram;
65 unsigned long dpram_phys;
66 unsigned long dpram_size;
67 struct {
68 uint16_t fw_version, hw_version, license, serial;
69 uint16_t chip[2];
70 unsigned int freq; /* remote cpu's operating frequency */
71 } id;
72};
73
74extern int softing_default_output(struct net_device *netdev);
75
76extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
77
78extern int softing_chip_poweron(struct softing *card);
79
80extern int softing_bootloader_command(struct softing *card, int16_t cmd,
81 const char *msg);
82
83/* Load firmware after reset */
84extern int softing_load_fw(const char *file, struct softing *card,
85 __iomem uint8_t *virt, unsigned int size, int offset);
86
87/* Load final application firmware after bootloader */
88extern int softing_load_app_fw(const char *file, struct softing *card);
89
90/*
91 * enable or disable irq
92 * only called with fw.lock locked
93 */
94extern int softing_enable_irq(struct softing *card, int enable);
95
96/* start/stop 1 bus on card */
97extern int softing_startstop(struct net_device *netdev, int up);
98
99/* netif_rx() */
100extern int softing_netdev_rx(struct net_device *netdev,
101 const struct can_frame *msg, ktime_t ktime);
102
103/* SOFTING DPRAM mappings */
104#define DPRAM_RX 0x0000
105 #define DPRAM_RX_SIZE 32
106 #define DPRAM_RX_CNT 16
107#define DPRAM_RX_RD 0x0201 /* uint8_t */
108#define DPRAM_RX_WR 0x0205 /* uint8_t */
109#define DPRAM_RX_LOST 0x0207 /* uint8_t */
110
111#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
112#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
113#define DPRAM_FCT_HOST 0x032b /* uint16_t */
114
115#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
116#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
117#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
118#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
119#define DPRAM_RESET 0x0341 /* uint16_t */
120#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
121#define DPRAM_RESET_TIME 0x034d /* uint16_t */
122#define DPRAM_TIME 0x0350 /* uint64_t */
123#define DPRAM_WR_START 0x0358 /* uint8_t */
124#define DPRAM_WR_END 0x0359 /* uint8_t */
125#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
126#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
127#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
128#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
129#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
130
131#define DPRAM_TX 0x0400 /* uint16_t */
132 #define DPRAM_TX_SIZE 16
133 #define DPRAM_TX_CNT 32
134#define DPRAM_TX_RD 0x0601 /* uint8_t */
135#define DPRAM_TX_WR 0x0605 /* uint8_t */
136
137#define DPRAM_COMMAND 0x07e0 /* uint16_t */
138#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
139#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
140#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
141
142#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
143#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
144
145#define TXMAX (DPRAM_TX_CNT - 1)
146
147/* DPRAM return codes */
148#define RES_NONE 0
149#define RES_OK 1
150#define RES_NOK 2
151#define RES_UNKNOWN 3
152/* DPRAM flags */
153#define CMD_TX 0x01
154#define CMD_ACK 0x02
155#define CMD_XTD 0x04
156#define CMD_RTR 0x08
157#define CMD_ERR 0x10
158#define CMD_BUS2 0x80
159
160/* returned fifo entry bus state masks */
161#define SF_MASK_BUSOFF 0x80
162#define SF_MASK_EPASSIVE 0x60
163
164/* bus states */
165#define STATE_BUSOFF 2
166#define STATE_EPASSIVE 1
167#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..c11bb4de8630
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23
24#include <pcmcia/cistpl.h>
25#include <pcmcia/ds.h>
26
27#include "softing_platform.h"
28
29static int softingcs_index;
30static spinlock_t softingcs_index_lock;
31
32static int softingcs_reset(struct platform_device *pdev, int v);
33static int softingcs_enable_irq(struct platform_device *pdev, int v);
34
35/*
36 * platform_data descriptions
37 */
38#define MHZ (1000*1000)
39static const struct softing_platform_data softingcs_platform_data[] = {
40{
41 .name = "CANcard",
42 .manf = 0x0168, .prod = 0x001,
43 .generation = 1,
44 .nbus = 2,
45 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
46 .dpram_size = 0x0800,
47 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
48 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
49 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
50 .reset = softingcs_reset,
51 .enable_irq = softingcs_enable_irq,
52}, {
53 .name = "CANcard-NEC",
54 .manf = 0x0168, .prod = 0x002,
55 .generation = 1,
56 .nbus = 2,
57 .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
58 .dpram_size = 0x0800,
59 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
60 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
61 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
62 .reset = softingcs_reset,
63 .enable_irq = softingcs_enable_irq,
64}, {
65 .name = "CANcard-SJA",
66 .manf = 0x0168, .prod = 0x004,
67 .generation = 1,
68 .nbus = 2,
69 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
70 .dpram_size = 0x0800,
71 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
72 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
73 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
74 .reset = softingcs_reset,
75 .enable_irq = softingcs_enable_irq,
76}, {
77 .name = "CANcard-2",
78 .manf = 0x0168, .prod = 0x005,
79 .generation = 2,
80 .nbus = 2,
81 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
82 .dpram_size = 0x1000,
83 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
84 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
85 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
86 .reset = softingcs_reset,
87 .enable_irq = NULL,
88}, {
89 .name = "Vector-CANcard",
90 .manf = 0x0168, .prod = 0x081,
91 .generation = 1,
92 .nbus = 2,
93 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
94 .dpram_size = 0x0800,
95 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
96 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
97 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
98 .reset = softingcs_reset,
99 .enable_irq = softingcs_enable_irq,
100}, {
101 .name = "Vector-CANcard-SJA",
102 .manf = 0x0168, .prod = 0x084,
103 .generation = 1,
104 .nbus = 2,
105 .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
106 .dpram_size = 0x0800,
107 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
108 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
109 .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
110 .reset = softingcs_reset,
111 .enable_irq = softingcs_enable_irq,
112}, {
113 .name = "Vector-CANcard-2",
114 .manf = 0x0168, .prod = 0x085,
115 .generation = 2,
116 .nbus = 2,
117 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
118 .dpram_size = 0x1000,
119 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
120 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
121 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
122 .reset = softingcs_reset,
123 .enable_irq = NULL,
124}, {
125 .name = "EDICcard-NEC",
126 .manf = 0x0168, .prod = 0x102,
127 .generation = 1,
128 .nbus = 2,
129 .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
130 .dpram_size = 0x0800,
131 .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
132 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
133 .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
134 .reset = softingcs_reset,
135 .enable_irq = softingcs_enable_irq,
136}, {
137 .name = "EDICcard-2",
138 .manf = 0x0168, .prod = 0x105,
139 .generation = 2,
140 .nbus = 2,
141 .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
142 .dpram_size = 0x1000,
143 .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
144 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
145 .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
146 .reset = softingcs_reset,
147 .enable_irq = NULL,
148}, {
149 0, 0,
150},
151};
152
153MODULE_FIRMWARE(fw_dir "bcard.bin");
154MODULE_FIRMWARE(fw_dir "ldcard.bin");
155MODULE_FIRMWARE(fw_dir "cancard.bin");
156MODULE_FIRMWARE(fw_dir "cansja.bin");
157
158MODULE_FIRMWARE(fw_dir "bcard2.bin");
159MODULE_FIRMWARE(fw_dir "ldcard2.bin");
160MODULE_FIRMWARE(fw_dir "cancrd2.bin");
161
162static __devinit const struct softing_platform_data
163*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
164{
165 const struct softing_platform_data *lp;
166
167 for (lp = softingcs_platform_data; lp->manf; ++lp) {
168 if ((lp->manf == manf) && (lp->prod == prod))
169 return lp;
170 }
171 return NULL;
172}
173
174/*
175 * platformdata callbacks
176 */
177static int softingcs_reset(struct platform_device *pdev, int v)
178{
179 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
180
181 dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
182 return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
183}
184
185static int softingcs_enable_irq(struct platform_device *pdev, int v)
186{
187 struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
188
189 dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
190 return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
191}
192
193/*
194 * pcmcia check
195 */
196static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
197 void *priv_data)
198{
199 struct softing_platform_data *pdat = priv_data;
200 struct resource *pres;
201 int memspeed = 0;
202
203 WARN_ON(!pdat);
204 pres = pcmcia->resource[PCMCIA_IOMEM_0];
205 if (resource_size(pres) < 0x1000)
206 return -ERANGE;
207
208 pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
209 if (pdat->generation < 2) {
210 pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
211 memspeed = 3;
212 } else {
213 pres->flags |= WIN_DATA_WIDTH_16;
214 }
215 return pcmcia_request_window(pcmcia, pres, memspeed);
216}
217
218static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
219{
220 struct platform_device *pdev = pcmcia->priv;
221
222 /* free bits */
223 platform_device_unregister(pdev);
224 /* release pcmcia stuff */
225 pcmcia_disable_device(pcmcia);
226}
227
228/*
229 * platform_device wrapper
230 * pdev->resource has 2 entries: io & irq
231 */
232static void softingcs_pdev_release(struct device *dev)
233{
234 struct platform_device *pdev = to_platform_device(dev);
235 kfree(pdev);
236}
237
238static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
239{
240 int ret;
241 struct platform_device *pdev;
242 const struct softing_platform_data *pdat;
243 struct resource *pres;
244 struct dev {
245 struct platform_device pdev;
246 struct resource res[2];
247 } *dev;
248
249 /* find matching platform_data */
250 pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
251 if (!pdat)
252 return -ENOTTY;
253
254 /* setup pcmcia device */
255 pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
256 CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
257 ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
258 if (ret)
259 goto pcmcia_failed;
260
261 ret = pcmcia_enable_device(pcmcia);
262 if (ret < 0)
263 goto pcmcia_failed;
264
265 pres = pcmcia->resource[PCMCIA_IOMEM_0];
266 if (!pres) {
267 ret = -EBADF;
268 goto pcmcia_bad;
269 }
270
271 /* create softing platform device */
272 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273 if (!dev) {
274 ret = -ENOMEM;
275 goto mem_failed;
276 }
277 dev->pdev.resource = dev->res;
278 dev->pdev.num_resources = ARRAY_SIZE(dev->res);
279 dev->pdev.dev.release = softingcs_pdev_release;
280
281 pdev = &dev->pdev;
282 pdev->dev.platform_data = (void *)pdat;
283 pdev->dev.parent = &pcmcia->dev;
284 pcmcia->priv = pdev;
285
286 /* platform device resources */
287 pdev->resource[0].flags = IORESOURCE_MEM;
288 pdev->resource[0].start = pres->start;
289 pdev->resource[0].end = pres->end;
290
291 pdev->resource[1].flags = IORESOURCE_IRQ;
292 pdev->resource[1].start = pcmcia->irq;
293 pdev->resource[1].end = pdev->resource[1].start;
294
295 /* platform device setup */
296 spin_lock(&softingcs_index_lock);
297 pdev->id = softingcs_index++;
298 spin_unlock(&softingcs_index_lock);
299 pdev->name = "softing";
300 dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
301 ret = platform_device_register(pdev);
302 if (ret < 0)
303 goto platform_failed;
304
305 dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
306 return 0;
307
308platform_failed:
309 kfree(dev);
310mem_failed:
311pcmcia_bad:
312pcmcia_failed:
313 pcmcia_disable_device(pcmcia);
314 pcmcia->priv = NULL;
315 return ret ?: -ENODEV;
316}
317
318static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
319 /* softing */
320 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
321 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
322 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
323 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
324 /* vector, manufacturer? */
325 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
326 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
327 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
328 /* EDIC */
329 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
330 PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
331 PCMCIA_DEVICE_NULL,
332};
333
334MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
335
336static struct pcmcia_driver softingcs_driver = {
337 .owner = THIS_MODULE,
338 .name = "softingcs",
339 .id_table = softingcs_ids,
340 .probe = softingcs_probe,
341 .remove = __devexit_p(softingcs_remove),
342};
343
344static int __init softingcs_start(void)
345{
346 spin_lock_init(&softingcs_index_lock);
347 return pcmcia_register_driver(&softingcs_driver);
348}
349
350static void __exit softingcs_stop(void)
351{
352 pcmcia_unregister_driver(&softingcs_driver);
353}
354
355module_init(softingcs_start);
356module_exit(softingcs_stop);
357
358MODULE_DESCRIPTION("softing CANcard driver"
359 ", links PCMCIA card to softing driver");
360MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/firmware.h>
21#include <linux/sched.h>
22#include <asm/div64.h>
23
24#include "softing.h"
25
26/*
27 * low level DPRAM command.
28 * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
29 */
30static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
31 const char *msg)
32{
33 int ret;
34 unsigned long stamp;
35
36 iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
37 iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
38 iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
39 /* be sure to flush this to the card */
40 wmb();
41 stamp = jiffies + 1 * HZ;
42 /* wait for card */
43 do {
44 /* DPRAM_FCT_HOST is _not_ aligned */
45 ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
46 (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
47 /* don't have any cached variables */
48 rmb();
49 if (ret == RES_OK)
50 /* read return-value now */
51 return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
52
53 if ((ret != vector) || time_after(jiffies, stamp))
54 break;
55 /* process context => relax */
56 usleep_range(500, 10000);
57 } while (1);
58
59 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
60 dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
61 return ret;
62}
63
64static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
65{
66 int ret;
67
68 ret = _softing_fct_cmd(card, cmd, 0, msg);
69 if (ret > 0) {
70 dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
71 ret = -EIO;
72 }
73 return ret;
74}
75
76int softing_bootloader_command(struct softing *card, int16_t cmd,
77 const char *msg)
78{
79 int ret;
80 unsigned long stamp;
81
82 iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
83 iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
84 /* be sure to flush this to the card */
85 wmb();
86 stamp = jiffies + 3 * HZ;
87 /* wait for card */
88 do {
89 ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
90 /* don't have any cached variables */
91 rmb();
92 if (ret == RES_OK)
93 return 0;
94 if (time_after(jiffies, stamp))
95 break;
96 /* process context => relax */
97 usleep_range(500, 10000);
98 } while (!signal_pending(current));
99
100 ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
101 dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
102 return ret;
103}
104
105static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
106 uint16_t *plen, const uint8_t **pdat)
107{
108 uint16_t checksum[2];
109 const uint8_t *mem;
110 const uint8_t *end;
111
112 /*
113 * firmware records are a binary, unaligned stream composed of:
114 * uint16_t type;
115 * uint32_t addr;
116 * uint16_t len;
117 * uint8_t dat[len];
118 * uint16_t checksum;
119 * all values in little endian.
120 * We could define a struct for this, with __attribute__((packed)),
121 * but would that solve the alignment in _all_ cases (cfr. the
122 * struct itself may be an odd address)?
123 *
124 * I chose to use leXX_to_cpup() since this solves both
125 * endianness & alignment.
126 */
127 mem = *pmem;
128 *ptype = le16_to_cpup((void *)&mem[0]);
129 *paddr = le32_to_cpup((void *)&mem[2]);
130 *plen = le16_to_cpup((void *)&mem[6]);
131 *pdat = &mem[8];
132 /* verify checksum */
133 end = &mem[8 + *plen];
134 checksum[0] = le16_to_cpup((void *)end);
135 for (checksum[1] = 0; mem < end; ++mem)
136 checksum[1] += *mem;
137 if (checksum[0] != checksum[1])
138 return -EINVAL;
139 /* increment */
140 *pmem += 10 + *plen;
141 return 0;
142}
143
144int softing_load_fw(const char *file, struct softing *card,
145 __iomem uint8_t *dpram, unsigned int size, int offset)
146{
147 const struct firmware *fw;
148 int ret;
149 const uint8_t *mem, *end, *dat;
150 uint16_t type, len;
151 uint32_t addr;
152 uint8_t *buf = NULL;
153 int buflen = 0;
154 int8_t type_end = 0;
155
156 ret = request_firmware(&fw, file, &card->pdev->dev);
157 if (ret < 0)
158 return ret;
159 dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
160 ", offset %c0x%04x\n",
161 card->pdat->name, file, (unsigned int)fw->size,
162 (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
163 /* parse the firmware */
164 mem = fw->data;
165 end = &mem[fw->size];
166 /* look for header record */
167 ret = fw_parse(&mem, &type, &addr, &len, &dat);
168 if (ret < 0)
169 goto failed;
170 if (type != 0xffff)
171 goto failed;
172 if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
173 ret = -EINVAL;
174 goto failed;
175 }
176 /* ok, we had a header */
177 while (mem < end) {
178 ret = fw_parse(&mem, &type, &addr, &len, &dat);
179 if (ret < 0)
180 goto failed;
181 if (type == 3) {
182 /* start address, not used here */
183 continue;
184 } else if (type == 1) {
185 /* eof */
186 type_end = 1;
187 break;
188 } else if (type != 0) {
189 ret = -EINVAL;
190 goto failed;
191 }
192
193 if ((addr + len + offset) > size)
194 goto failed;
195 memcpy_toio(&dpram[addr + offset], dat, len);
196 /* be sure to flush caches from IO space */
197 mb();
198 if (len > buflen) {
199 /* align buflen */
200 buflen = (len + (1024-1)) & ~(1024-1);
201 buf = krealloc(buf, buflen, GFP_KERNEL);
202 if (!buf) {
203 ret = -ENOMEM;
204 goto failed;
205 }
206 }
207 /* verify record data */
208 memcpy_fromio(buf, &dpram[addr + offset], len);
209 if (memcmp(buf, dat, len)) {
210 /* is not ok */
211 dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
212 ret = -EIO;
213 goto failed;
214 }
215 }
216 if (!type_end)
217 /* no end record seen */
218 goto failed;
219 ret = 0;
220failed:
221 kfree(buf);
222 release_firmware(fw);
223 if (ret < 0)
224 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
225 return ret;
226}
227
228int softing_load_app_fw(const char *file, struct softing *card)
229{
230 const struct firmware *fw;
231 const uint8_t *mem, *end, *dat;
232 int ret, j;
233 uint16_t type, len;
234 uint32_t addr, start_addr = 0;
235 unsigned int sum, rx_sum;
236 int8_t type_end = 0, type_entrypoint = 0;
237
238 ret = request_firmware(&fw, file, &card->pdev->dev);
239 if (ret) {
240 dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
241 file, ret);
242 return ret;
243 }
244 dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
245 file, (unsigned long)fw->size);
246 /* parse the firmware */
247 mem = fw->data;
248 end = &mem[fw->size];
249 /* look for header record */
250 ret = fw_parse(&mem, &type, &addr, &len, &dat);
251 if (ret)
252 goto failed;
253 ret = -EINVAL;
254 if (type != 0xffff) {
255 dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
256 type);
257 goto failed;
258 }
259 if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
260 dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
261 len, dat);
262 goto failed;
263 }
264 /* ok, we had a header */
265 while (mem < end) {
266 ret = fw_parse(&mem, &type, &addr, &len, &dat);
267 if (ret)
268 goto failed;
269
270 if (type == 3) {
271 /* start address */
272 start_addr = addr;
273 type_entrypoint = 1;
274 continue;
275 } else if (type == 1) {
276 /* eof */
277 type_end = 1;
278 break;
279 } else if (type != 0) {
280 dev_alert(&card->pdev->dev,
281 "unknown record type 0x%04x\n", type);
282 ret = -EINVAL;
283 goto failed;
284 }
285
286 /* regualar data */
287 for (sum = 0, j = 0; j < len; ++j)
288 sum += dat[j];
289 /* work in 16bit (target) */
290 sum &= 0xffff;
291
292 memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
293 iowrite32(card->pdat->app.offs + card->pdat->app.addr,
294 &card->dpram[DPRAM_COMMAND + 2]);
295 iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
296 iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
297 iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
298 ret = softing_bootloader_command(card, 1, "loading app.");
299 if (ret < 0)
300 goto failed;
301 /* verify checksum */
302 rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
303 if (rx_sum != sum) {
304 dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
305 ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
306 ret = -EIO;
307 goto failed;
308 }
309 }
310 if (!type_end || !type_entrypoint)
311 goto failed;
312 /* start application in card */
313 iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
314 iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
315 ret = softing_bootloader_command(card, 3, "start app.");
316 if (ret < 0)
317 goto failed;
318 ret = 0;
319failed:
320 release_firmware(fw);
321 if (ret < 0)
322 dev_info(&card->pdev->dev, "firmware %s failed\n", file);
323 return ret;
324}
325
326static int softing_reset_chip(struct softing *card)
327{
328 int ret;
329
330 do {
331 /* reset chip */
332 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
333 iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
334 iowrite8(1, &card->dpram[DPRAM_RESET]);
335 iowrite8(0, &card->dpram[DPRAM_RESET+1]);
336
337 ret = softing_fct_cmd(card, 0, "reset_can");
338 if (!ret)
339 break;
340 if (signal_pending(current))
341 /* don't wait any longer */
342 break;
343 } while (1);
344 card->tx.pending = 0;
345 return ret;
346}
347
348int softing_chip_poweron(struct softing *card)
349{
350 int ret;
351 /* sync */
352 ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
353 if (ret < 0)
354 goto failed;
355
356 ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
357 if (ret < 0)
358 goto failed;
359
360 ret = softing_reset_chip(card);
361 if (ret < 0)
362 goto failed;
363 /* get_serial */
364 ret = softing_fct_cmd(card, 43, "get_serial_number");
365 if (ret < 0)
366 goto failed;
367 card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
368 /* get_version */
369 ret = softing_fct_cmd(card, 12, "get_version");
370 if (ret < 0)
371 goto failed;
372 card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
373 card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
374 card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
375 card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
376 card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
377 return 0;
378failed:
379 return ret;
380}
381
382static void softing_initialize_timestamp(struct softing *card)
383{
384 uint64_t ovf;
385
386 card->ts_ref = ktime_get();
387
388 /* 16MHz is the reference */
389 ovf = 0x100000000ULL * 16;
390 do_div(ovf, card->pdat->freq ?: 16);
391
392 card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
393}
394
395ktime_t softing_raw2ktime(struct softing *card, u32 raw)
396{
397 uint64_t rawl;
398 ktime_t now, real_offset;
399 ktime_t target;
400 ktime_t tmp;
401
402 now = ktime_get();
403 real_offset = ktime_sub(ktime_get_real(), now);
404
405 /* find nsec from card */
406 rawl = raw * 16;
407 do_div(rawl, card->pdat->freq ?: 16);
408 target = ktime_add_us(card->ts_ref, rawl);
409 /* test for overflows */
410 tmp = ktime_add(target, card->ts_overflow);
411 while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
412 card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
413 target = tmp;
414 tmp = ktime_add(target, card->ts_overflow);
415 }
416 return ktime_add(target, real_offset);
417}
418
419static inline int softing_error_reporting(struct net_device *netdev)
420{
421 struct softing_priv *priv = netdev_priv(netdev);
422
423 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
424 ? 1 : 0;
425}
426
427int softing_startstop(struct net_device *dev, int up)
428{
429 int ret;
430 struct softing *card;
431 struct softing_priv *priv;
432 struct net_device *netdev;
433 int bus_bitmask_start;
434 int j, error_reporting;
435 struct can_frame msg;
436 const struct can_bittiming *bt;
437
438 priv = netdev_priv(dev);
439 card = priv->card;
440
441 if (!card->fw.up)
442 return -EIO;
443
444 ret = mutex_lock_interruptible(&card->fw.lock);
445 if (ret)
446 return ret;
447
448 bus_bitmask_start = 0;
449 if (dev && up)
450 /* prepare to start this bus as well */
451 bus_bitmask_start |= (1 << priv->index);
452 /* bring netdevs down */
453 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
454 netdev = card->net[j];
455 if (!netdev)
456 continue;
457 priv = netdev_priv(netdev);
458
459 if (dev != netdev)
460 netif_stop_queue(netdev);
461
462 if (netif_running(netdev)) {
463 if (dev != netdev)
464 bus_bitmask_start |= (1 << j);
465 priv->tx.pending = 0;
466 priv->tx.echo_put = 0;
467 priv->tx.echo_get = 0;
468 /*
469 * this bus' may just have called open_candev()
470 * which is rather stupid to call close_candev()
471 * already
472 * but we may come here from busoff recovery too
473 * in which case the echo_skb _needs_ flushing too.
474 * just be sure to call open_candev() again
475 */
476 close_candev(netdev);
477 }
478 priv->can.state = CAN_STATE_STOPPED;
479 }
480 card->tx.pending = 0;
481
482 softing_enable_irq(card, 0);
483 ret = softing_reset_chip(card);
484 if (ret)
485 goto failed;
486 if (!bus_bitmask_start)
487 /* no busses to be brought up */
488 goto card_done;
489
490 if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
491 && (softing_error_reporting(card->net[0])
492 != softing_error_reporting(card->net[1]))) {
493 dev_alert(&card->pdev->dev,
494 "err_reporting flag differs for busses\n");
495 goto invalid;
496 }
497 error_reporting = 0;
498 if (bus_bitmask_start & 1) {
499 netdev = card->net[0];
500 priv = netdev_priv(netdev);
501 error_reporting += softing_error_reporting(netdev);
502 /* init chip 1 */
503 bt = &priv->can.bittiming;
504 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
505 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
506 iowrite16(bt->phase_seg1 + bt->prop_seg,
507 &card->dpram[DPRAM_FCT_PARAM + 6]);
508 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
509 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
510 &card->dpram[DPRAM_FCT_PARAM + 10]);
511 ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
512 if (ret < 0)
513 goto failed;
514 /* set mode */
515 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
516 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
517 ret = softing_fct_cmd(card, 3, "set_mode[0]");
518 if (ret < 0)
519 goto failed;
520 /* set filter */
521 /* 11bit id & mask */
522 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
523 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
524 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
525 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
526 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
527 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
528 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
529 ret = softing_fct_cmd(card, 7, "set_filter[0]");
530 if (ret < 0)
531 goto failed;
532 /* set output control */
533 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
534 ret = softing_fct_cmd(card, 5, "set_output[0]");
535 if (ret < 0)
536 goto failed;
537 }
538 if (bus_bitmask_start & 2) {
539 netdev = card->net[1];
540 priv = netdev_priv(netdev);
541 error_reporting += softing_error_reporting(netdev);
542 /* init chip2 */
543 bt = &priv->can.bittiming;
544 iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
545 iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
546 iowrite16(bt->phase_seg1 + bt->prop_seg,
547 &card->dpram[DPRAM_FCT_PARAM + 6]);
548 iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
549 iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
550 &card->dpram[DPRAM_FCT_PARAM + 10]);
551 ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
552 if (ret < 0)
553 goto failed;
554 /* set mode2 */
555 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
556 iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
557 ret = softing_fct_cmd(card, 4, "set_mode[1]");
558 if (ret < 0)
559 goto failed;
560 /* set filter2 */
561 /* 11bit id & mask */
562 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
563 iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
564 /* 29bit id.lo & mask.lo & id.hi & mask.hi */
565 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
566 iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
567 iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
568 iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
569 ret = softing_fct_cmd(card, 8, "set_filter[1]");
570 if (ret < 0)
571 goto failed;
572 /* set output control2 */
573 iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
574 ret = softing_fct_cmd(card, 6, "set_output[1]");
575 if (ret < 0)
576 goto failed;
577 }
578 /* enable_error_frame */
579 /*
580 * Error reporting is switched off at the moment since
581 * the receiving of them is not yet 100% verified
582 * This should be enabled sooner or later
583 *
584 if (error_reporting) {
585 ret = softing_fct_cmd(card, 51, "enable_error_frame");
586 if (ret < 0)
587 goto failed;
588 }
589 */
590 /* initialize interface */
591 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
592 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
593 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
594 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
595 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
596 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
597 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
598 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
599 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
600 iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
601 ret = softing_fct_cmd(card, 17, "initialize_interface");
602 if (ret < 0)
603 goto failed;
604 /* enable_fifo */
605 ret = softing_fct_cmd(card, 36, "enable_fifo");
606 if (ret < 0)
607 goto failed;
608 /* enable fifo tx ack */
609 ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
610 if (ret < 0)
611 goto failed;
612 /* enable fifo tx ack2 */
613 ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
614 if (ret < 0)
615 goto failed;
616 /* start_chip */
617 ret = softing_fct_cmd(card, 11, "start_chip");
618 if (ret < 0)
619 goto failed;
620 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
621 iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
622 if (card->pdat->generation < 2) {
623 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
624 /* flush the DPRAM caches */
625 wmb();
626 }
627
628 softing_initialize_timestamp(card);
629
630 /*
631 * do socketcan notifications/status changes
632 * from here, no errors should occur, or the failed: part
633 * must be reviewed
634 */
635 memset(&msg, 0, sizeof(msg));
636 msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
637 msg.can_dlc = CAN_ERR_DLC;
638 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
639 if (!(bus_bitmask_start & (1 << j)))
640 continue;
641 netdev = card->net[j];
642 if (!netdev)
643 continue;
644 priv = netdev_priv(netdev);
645 priv->can.state = CAN_STATE_ERROR_ACTIVE;
646 open_candev(netdev);
647 if (dev != netdev) {
648 /* notify other busses on the restart */
649 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
650 ++priv->can.can_stats.restarts;
651 }
652 netif_wake_queue(netdev);
653 }
654
655 /* enable interrupts */
656 ret = softing_enable_irq(card, 1);
657 if (ret)
658 goto failed;
659card_done:
660 mutex_unlock(&card->fw.lock);
661 return 0;
662invalid:
663 ret = -EINVAL;
664failed:
665 softing_enable_irq(card, 0);
666 softing_reset_chip(card);
667 mutex_unlock(&card->fw.lock);
668 /* bring all other interfaces down */
669 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
670 netdev = card->net[j];
671 if (!netdev)
672 continue;
673 dev_close(netdev);
674 }
675 return ret;
676}
677
678int softing_default_output(struct net_device *netdev)
679{
680 struct softing_priv *priv = netdev_priv(netdev);
681 struct softing *card = priv->card;
682
683 switch (priv->chip) {
684 case 1000:
685 return (card->pdat->generation < 2) ? 0xfb : 0xfa;
686 case 5:
687 return 0x60;
688 default:
689 return 0x40;
690 }
691}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..5157e15e96eb
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,893 @@
1/*
2 * Copyright (C) 2008-2010
3 *
4 * - Kurt Van Dijck, EIA Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the version 2 of the GNU General Public License
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/version.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24
25#include "softing.h"
26
27#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
28
29/*
30 * test is a specific CAN netdev
31 * is online (ie. up 'n running, not sleeping, not busoff
32 */
33static inline int canif_is_active(struct net_device *netdev)
34{
35 struct can_priv *can = netdev_priv(netdev);
36
37 if (!netif_running(netdev))
38 return 0;
39 return (can->state <= CAN_STATE_ERROR_PASSIVE);
40}
41
42/* reset DPRAM */
43static inline void softing_set_reset_dpram(struct softing *card)
44{
45 if (card->pdat->generation >= 2) {
46 spin_lock_bh(&card->spin);
47 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
48 &card->dpram[DPRAM_V2_RESET]);
49 spin_unlock_bh(&card->spin);
50 }
51}
52
53static inline void softing_clr_reset_dpram(struct softing *card)
54{
55 if (card->pdat->generation >= 2) {
56 spin_lock_bh(&card->spin);
57 iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
58 &card->dpram[DPRAM_V2_RESET]);
59 spin_unlock_bh(&card->spin);
60 }
61}
62
63/* trigger the tx queue-ing */
64static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
65 struct net_device *dev)
66{
67 struct softing_priv *priv = netdev_priv(dev);
68 struct softing *card = priv->card;
69 int ret;
70 uint8_t *ptr;
71 uint8_t fifo_wr, fifo_rd;
72 struct can_frame *cf = (struct can_frame *)skb->data;
73 uint8_t buf[DPRAM_TX_SIZE];
74
75 if (can_dropped_invalid_skb(dev, skb))
76 return NETDEV_TX_OK;
77
78 spin_lock(&card->spin);
79
80 ret = NETDEV_TX_BUSY;
81 if (!card->fw.up ||
82 (card->tx.pending >= TXMAX) ||
83 (priv->tx.pending >= TX_ECHO_SKB_MAX))
84 goto xmit_done;
85 fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
86 fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
87 if (fifo_wr == fifo_rd)
88 /* fifo full */
89 goto xmit_done;
90 memset(buf, 0, sizeof(buf));
91 ptr = buf;
92 *ptr = CMD_TX;
93 if (cf->can_id & CAN_RTR_FLAG)
94 *ptr |= CMD_RTR;
95 if (cf->can_id & CAN_EFF_FLAG)
96 *ptr |= CMD_XTD;
97 if (priv->index)
98 *ptr |= CMD_BUS2;
99 ++ptr;
100 *ptr++ = cf->can_dlc;
101 *ptr++ = (cf->can_id >> 0);
102 *ptr++ = (cf->can_id >> 8);
103 if (cf->can_id & CAN_EFF_FLAG) {
104 *ptr++ = (cf->can_id >> 16);
105 *ptr++ = (cf->can_id >> 24);
106 } else {
107 /* increment 1, not 2 as you might think */
108 ptr += 1;
109 }
110 if (!(cf->can_id & CAN_RTR_FLAG))
111 memcpy(ptr, &cf->data[0], cf->can_dlc);
112 memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
113 buf, DPRAM_TX_SIZE);
114 if (++fifo_wr >= DPRAM_TX_CNT)
115 fifo_wr = 0;
116 iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
117 card->tx.last_bus = priv->index;
118 ++card->tx.pending;
119 ++priv->tx.pending;
120 can_put_echo_skb(skb, dev, priv->tx.echo_put);
121 ++priv->tx.echo_put;
122 if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
123 priv->tx.echo_put = 0;
124 /* can_put_echo_skb() saves the skb, safe to return TX_OK */
125 ret = NETDEV_TX_OK;
126xmit_done:
127 spin_unlock(&card->spin);
128 if (card->tx.pending >= TXMAX) {
129 int j;
130 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
131 if (card->net[j])
132 netif_stop_queue(card->net[j]);
133 }
134 }
135 if (ret != NETDEV_TX_OK)
136 netif_stop_queue(dev);
137
138 return ret;
139}
140
141/*
142 * shortcut for skb delivery
143 */
144int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
145 ktime_t ktime)
146{
147 struct sk_buff *skb;
148 struct can_frame *cf;
149
150 skb = alloc_can_skb(netdev, &cf);
151 if (!skb)
152 return -ENOMEM;
153 memcpy(cf, msg, sizeof(*msg));
154 skb->tstamp = ktime;
155 return netif_rx(skb);
156}
157
158/*
159 * softing_handle_1
160 * pop 1 entry from the DPRAM queue, and process
161 */
162static int softing_handle_1(struct softing *card)
163{
164 struct net_device *netdev;
165 struct softing_priv *priv;
166 ktime_t ktime;
167 struct can_frame msg;
168 int cnt = 0, lost_msg;
169 uint8_t fifo_rd, fifo_wr, cmd;
170 uint8_t *ptr;
171 uint32_t tmp_u32;
172 uint8_t buf[DPRAM_RX_SIZE];
173
174 memset(&msg, 0, sizeof(msg));
175 /* test for lost msgs */
176 lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
177 if (lost_msg) {
178 int j;
179 /* reset condition */
180 iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
181 /* prepare msg */
182 msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
183 msg.can_dlc = CAN_ERR_DLC;
184 msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
185 /*
186 * service to all busses, we don't know which it was applicable
187 * but only service busses that are online
188 */
189 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
190 netdev = card->net[j];
191 if (!netdev)
192 continue;
193 if (!canif_is_active(netdev))
194 /* a dead bus has no overflows */
195 continue;
196 ++netdev->stats.rx_over_errors;
197 softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
198 }
199 /* prepare for other use */
200 memset(&msg, 0, sizeof(msg));
201 ++cnt;
202 }
203
204 fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
205 fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
206
207 if (++fifo_rd >= DPRAM_RX_CNT)
208 fifo_rd = 0;
209 if (fifo_wr == fifo_rd)
210 return cnt;
211
212 memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
213 DPRAM_RX_SIZE);
214 mb();
215 /* trigger dual port RAM */
216 iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
217
218 ptr = buf;
219 cmd = *ptr++;
220 if (cmd == 0xff)
221 /* not quite usefull, probably the card has got out */
222 return 0;
223 netdev = card->net[0];
224 if (cmd & CMD_BUS2)
225 netdev = card->net[1];
226 priv = netdev_priv(netdev);
227
228 if (cmd & CMD_ERR) {
229 uint8_t can_state, state;
230
231 state = *ptr++;
232
233 msg.can_id = CAN_ERR_FLAG;
234 msg.can_dlc = CAN_ERR_DLC;
235
236 if (state & SF_MASK_BUSOFF) {
237 can_state = CAN_STATE_BUS_OFF;
238 msg.can_id |= CAN_ERR_BUSOFF;
239 state = STATE_BUSOFF;
240 } else if (state & SF_MASK_EPASSIVE) {
241 can_state = CAN_STATE_ERROR_PASSIVE;
242 msg.can_id |= CAN_ERR_CRTL;
243 msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
244 state = STATE_EPASSIVE;
245 } else {
246 can_state = CAN_STATE_ERROR_ACTIVE;
247 msg.can_id |= CAN_ERR_CRTL;
248 state = STATE_EACTIVE;
249 }
250 /* update DPRAM */
251 iowrite8(state, &card->dpram[priv->index ?
252 DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
253 /* timestamp */
254 tmp_u32 = le32_to_cpup((void *)ptr);
255 ptr += 4;
256 ktime = softing_raw2ktime(card, tmp_u32);
257
258 ++netdev->stats.rx_errors;
259 /* update internal status */
260 if (can_state != priv->can.state) {
261 priv->can.state = can_state;
262 if (can_state == CAN_STATE_ERROR_PASSIVE)
263 ++priv->can.can_stats.error_passive;
264 else if (can_state == CAN_STATE_BUS_OFF) {
265 /* this calls can_close_cleanup() */
266 can_bus_off(netdev);
267 netif_stop_queue(netdev);
268 }
269 /* trigger socketcan */
270 softing_netdev_rx(netdev, &msg, ktime);
271 }
272
273 } else {
274 if (cmd & CMD_RTR)
275 msg.can_id |= CAN_RTR_FLAG;
276 msg.can_dlc = get_can_dlc(*ptr++);
277 if (cmd & CMD_XTD) {
278 msg.can_id |= CAN_EFF_FLAG;
279 msg.can_id |= le32_to_cpup((void *)ptr);
280 ptr += 4;
281 } else {
282 msg.can_id |= le16_to_cpup((void *)ptr);
283 ptr += 2;
284 }
285 /* timestamp */
286 tmp_u32 = le32_to_cpup((void *)ptr);
287 ptr += 4;
288 ktime = softing_raw2ktime(card, tmp_u32);
289 if (!(msg.can_id & CAN_RTR_FLAG))
290 memcpy(&msg.data[0], ptr, 8);
291 ptr += 8;
292 /* update socket */
293 if (cmd & CMD_ACK) {
294 /* acknowledge, was tx msg */
295 struct sk_buff *skb;
296 skb = priv->can.echo_skb[priv->tx.echo_get];
297 if (skb)
298 skb->tstamp = ktime;
299 can_get_echo_skb(netdev, priv->tx.echo_get);
300 ++priv->tx.echo_get;
301 if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
302 priv->tx.echo_get = 0;
303 if (priv->tx.pending)
304 --priv->tx.pending;
305 if (card->tx.pending)
306 --card->tx.pending;
307 ++netdev->stats.tx_packets;
308 if (!(msg.can_id & CAN_RTR_FLAG))
309 netdev->stats.tx_bytes += msg.can_dlc;
310 } else {
311 int ret;
312
313 ret = softing_netdev_rx(netdev, &msg, ktime);
314 if (ret == NET_RX_SUCCESS) {
315 ++netdev->stats.rx_packets;
316 if (!(msg.can_id & CAN_RTR_FLAG))
317 netdev->stats.rx_bytes += msg.can_dlc;
318 } else {
319 ++netdev->stats.rx_dropped;
320 }
321 }
322 }
323 ++cnt;
324 return cnt;
325}
326
327/*
328 * real interrupt handler
329 */
330static irqreturn_t softing_irq_thread(int irq, void *dev_id)
331{
332 struct softing *card = (struct softing *)dev_id;
333 struct net_device *netdev;
334 struct softing_priv *priv;
335 int j, offset, work_done;
336
337 work_done = 0;
338 spin_lock_bh(&card->spin);
339 while (softing_handle_1(card) > 0) {
340 ++card->irq.svc_count;
341 ++work_done;
342 }
343 spin_unlock_bh(&card->spin);
344 /* resume tx queue's */
345 offset = card->tx.last_bus;
346 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
347 if (card->tx.pending >= TXMAX)
348 break;
349 netdev = card->net[(j + offset + 1) % card->pdat->nbus];
350 if (!netdev)
351 continue;
352 priv = netdev_priv(netdev);
353 if (!canif_is_active(netdev))
354 /* it makes no sense to wake dead busses */
355 continue;
356 if (priv->tx.pending >= TX_ECHO_SKB_MAX)
357 continue;
358 ++work_done;
359 netif_wake_queue(netdev);
360 }
361 return work_done ? IRQ_HANDLED : IRQ_NONE;
362}
363
364/*
365 * interrupt routines:
366 * schedule the 'real interrupt handler'
367 */
368static irqreturn_t softing_irq_v2(int irq, void *dev_id)
369{
370 struct softing *card = (struct softing *)dev_id;
371 uint8_t ir;
372
373 ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
374 iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
375 return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
376}
377
378static irqreturn_t softing_irq_v1(int irq, void *dev_id)
379{
380 struct softing *card = (struct softing *)dev_id;
381 uint8_t ir;
382
383 ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
384 iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
385 return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
386}
387
388/*
389 * netdev/candev inter-operability
390 */
391static int softing_netdev_open(struct net_device *ndev)
392{
393 int ret;
394
395 /* check or determine and set bittime */
396 ret = open_candev(ndev);
397 if (!ret)
398 ret = softing_startstop(ndev, 1);
399 return ret;
400}
401
402static int softing_netdev_stop(struct net_device *ndev)
403{
404 int ret;
405
406 netif_stop_queue(ndev);
407
408 /* softing cycle does close_candev() */
409 ret = softing_startstop(ndev, 0);
410 return ret;
411}
412
413static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
414{
415 int ret;
416
417 switch (mode) {
418 case CAN_MODE_START:
419 /* softing_startstop does close_candev() */
420 ret = softing_startstop(ndev, 1);
421 return ret;
422 case CAN_MODE_STOP:
423 case CAN_MODE_SLEEP:
424 return -EOPNOTSUPP;
425 }
426 return 0;
427}
428
429/*
430 * Softing device management helpers
431 */
432int softing_enable_irq(struct softing *card, int enable)
433{
434 int ret;
435
436 if (!card->irq.nr) {
437 return 0;
438 } else if (card->irq.requested && !enable) {
439 free_irq(card->irq.nr, card);
440 card->irq.requested = 0;
441 } else if (!card->irq.requested && enable) {
442 ret = request_threaded_irq(card->irq.nr,
443 (card->pdat->generation >= 2) ?
444 softing_irq_v2 : softing_irq_v1,
445 softing_irq_thread, IRQF_SHARED,
446 dev_name(&card->pdev->dev), card);
447 if (ret) {
448 dev_alert(&card->pdev->dev,
449 "request_threaded_irq(%u) failed\n",
450 card->irq.nr);
451 return ret;
452 }
453 card->irq.requested = 1;
454 }
455 return 0;
456}
457
458static void softing_card_shutdown(struct softing *card)
459{
460 int fw_up = 0;
461
462 if (mutex_lock_interruptible(&card->fw.lock))
463 /* return -ERESTARTSYS */;
464 fw_up = card->fw.up;
465 card->fw.up = 0;
466
467 if (card->irq.requested && card->irq.nr) {
468 free_irq(card->irq.nr, card);
469 card->irq.requested = 0;
470 }
471 if (fw_up) {
472 if (card->pdat->enable_irq)
473 card->pdat->enable_irq(card->pdev, 0);
474 softing_set_reset_dpram(card);
475 if (card->pdat->reset)
476 card->pdat->reset(card->pdev, 1);
477 }
478 mutex_unlock(&card->fw.lock);
479}
480
481static __devinit int softing_card_boot(struct softing *card)
482{
483 int ret, j;
484 static const uint8_t stream[] = {
485 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
486 unsigned char back[sizeof(stream)];
487
488 if (mutex_lock_interruptible(&card->fw.lock))
489 return -ERESTARTSYS;
490 if (card->fw.up) {
491 mutex_unlock(&card->fw.lock);
492 return 0;
493 }
494 /* reset board */
495 if (card->pdat->enable_irq)
496 card->pdat->enable_irq(card->pdev, 1);
497 /* boot card */
498 softing_set_reset_dpram(card);
499 if (card->pdat->reset)
500 card->pdat->reset(card->pdev, 1);
501 for (j = 0; (j + sizeof(stream)) < card->dpram_size;
502 j += sizeof(stream)) {
503
504 memcpy_toio(&card->dpram[j], stream, sizeof(stream));
505 /* flush IO cache */
506 mb();
507 memcpy_fromio(back, &card->dpram[j], sizeof(stream));
508
509 if (!memcmp(back, stream, sizeof(stream)))
510 continue;
511 /* memory is not equal */
512 dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
513 ret = -EIO;
514 goto failed;
515 }
516 wmb();
517 /* load boot firmware */
518 ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
519 card->dpram_size,
520 card->pdat->boot.offs - card->pdat->boot.addr);
521 if (ret < 0)
522 goto failed;
523 /* load loader firmware */
524 ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
525 card->dpram_size,
526 card->pdat->load.offs - card->pdat->load.addr);
527 if (ret < 0)
528 goto failed;
529
530 if (card->pdat->reset)
531 card->pdat->reset(card->pdev, 0);
532 softing_clr_reset_dpram(card);
533 ret = softing_bootloader_command(card, 0, "card boot");
534 if (ret < 0)
535 goto failed;
536 ret = softing_load_app_fw(card->pdat->app.fw, card);
537 if (ret < 0)
538 goto failed;
539
540 ret = softing_chip_poweron(card);
541 if (ret < 0)
542 goto failed;
543
544 card->fw.up = 1;
545 mutex_unlock(&card->fw.lock);
546 return 0;
547failed:
548 card->fw.up = 0;
549 if (card->pdat->enable_irq)
550 card->pdat->enable_irq(card->pdev, 0);
551 softing_set_reset_dpram(card);
552 if (card->pdat->reset)
553 card->pdat->reset(card->pdev, 1);
554 mutex_unlock(&card->fw.lock);
555 return ret;
556}
557
558/*
559 * netdev sysfs
560 */
561static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
562 char *buf)
563{
564 struct net_device *ndev = to_net_dev(dev);
565 struct softing_priv *priv = netdev2softing(ndev);
566
567 return sprintf(buf, "%i\n", priv->index);
568}
569
570static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
571 char *buf)
572{
573 struct net_device *ndev = to_net_dev(dev);
574 struct softing_priv *priv = netdev2softing(ndev);
575
576 return sprintf(buf, "%i\n", priv->chip);
577}
578
579static ssize_t show_output(struct device *dev, struct device_attribute *attr,
580 char *buf)
581{
582 struct net_device *ndev = to_net_dev(dev);
583 struct softing_priv *priv = netdev2softing(ndev);
584
585 return sprintf(buf, "0x%02x\n", priv->output);
586}
587
588static ssize_t store_output(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 struct net_device *ndev = to_net_dev(dev);
592 struct softing_priv *priv = netdev2softing(ndev);
593 struct softing *card = priv->card;
594 unsigned long val;
595 int ret;
596
597 ret = strict_strtoul(buf, 0, &val);
598 if (ret < 0)
599 return ret;
600 val &= 0xFF;
601
602 ret = mutex_lock_interruptible(&card->fw.lock);
603 if (ret)
604 return -ERESTARTSYS;
605 if (netif_running(ndev)) {
606 mutex_unlock(&card->fw.lock);
607 return -EBUSY;
608 }
609 priv->output = val;
610 mutex_unlock(&card->fw.lock);
611 return count;
612}
613
614static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
615static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
616static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
617
618static const struct attribute *const netdev_sysfs_attrs[] = {
619 &dev_attr_channel.attr,
620 &dev_attr_chip.attr,
621 &dev_attr_output.attr,
622 NULL,
623};
624static const struct attribute_group netdev_sysfs_group = {
625 .name = NULL,
626 .attrs = (struct attribute **)netdev_sysfs_attrs,
627};
628
629static const struct net_device_ops softing_netdev_ops = {
630 .ndo_open = softing_netdev_open,
631 .ndo_stop = softing_netdev_stop,
632 .ndo_start_xmit = softing_netdev_start_xmit,
633};
634
635static const struct can_bittiming_const softing_btr_const = {
636 .tseg1_min = 1,
637 .tseg1_max = 16,
638 .tseg2_min = 1,
639 .tseg2_max = 8,
640 .sjw_max = 4, /* overruled */
641 .brp_min = 1,
642 .brp_max = 32, /* overruled */
643 .brp_inc = 1,
644};
645
646
647static __devinit struct net_device *softing_netdev_create(struct softing *card,
648 uint16_t chip_id)
649{
650 struct net_device *netdev;
651 struct softing_priv *priv;
652
653 netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
654 if (!netdev) {
655 dev_alert(&card->pdev->dev, "alloc_candev failed\n");
656 return NULL;
657 }
658 priv = netdev_priv(netdev);
659 priv->netdev = netdev;
660 priv->card = card;
661 memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
662 priv->btr_const.brp_max = card->pdat->max_brp;
663 priv->btr_const.sjw_max = card->pdat->max_sjw;
664 priv->can.bittiming_const = &priv->btr_const;
665 priv->can.clock.freq = 8000000;
666 priv->chip = chip_id;
667 priv->output = softing_default_output(netdev);
668 SET_NETDEV_DEV(netdev, &card->pdev->dev);
669
670 netdev->flags |= IFF_ECHO;
671 netdev->netdev_ops = &softing_netdev_ops;
672 priv->can.do_set_mode = softing_candev_set_mode;
673 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
674
675 return netdev;
676}
677
678static __devinit int softing_netdev_register(struct net_device *netdev)
679{
680 int ret;
681
682 netdev->sysfs_groups[0] = &netdev_sysfs_group;
683 ret = register_candev(netdev);
684 if (ret) {
685 dev_alert(&netdev->dev, "register failed\n");
686 return ret;
687 }
688 return 0;
689}
690
691static void softing_netdev_cleanup(struct net_device *netdev)
692{
693 unregister_candev(netdev);
694 free_candev(netdev);
695}
696
697/*
698 * sysfs for Platform device
699 */
700#define DEV_ATTR_RO(name, member) \
701static ssize_t show_##name(struct device *dev, \
702 struct device_attribute *attr, char *buf) \
703{ \
704 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
705 return sprintf(buf, "%u\n", card->member); \
706} \
707static DEVICE_ATTR(name, 0444, show_##name, NULL)
708
709#define DEV_ATTR_RO_STR(name, member) \
710static ssize_t show_##name(struct device *dev, \
711 struct device_attribute *attr, char *buf) \
712{ \
713 struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
714 return sprintf(buf, "%s\n", card->member); \
715} \
716static DEVICE_ATTR(name, 0444, show_##name, NULL)
717
718DEV_ATTR_RO(serial, id.serial);
719DEV_ATTR_RO_STR(firmware, pdat->app.fw);
720DEV_ATTR_RO(firmware_version, id.fw_version);
721DEV_ATTR_RO_STR(hardware, pdat->name);
722DEV_ATTR_RO(hardware_version, id.hw_version);
723DEV_ATTR_RO(license, id.license);
724DEV_ATTR_RO(frequency, id.freq);
725DEV_ATTR_RO(txpending, tx.pending);
726
727static struct attribute *softing_pdev_attrs[] = {
728 &dev_attr_serial.attr,
729 &dev_attr_firmware.attr,
730 &dev_attr_firmware_version.attr,
731 &dev_attr_hardware.attr,
732 &dev_attr_hardware_version.attr,
733 &dev_attr_license.attr,
734 &dev_attr_frequency.attr,
735 &dev_attr_txpending.attr,
736 NULL,
737};
738
739static const struct attribute_group softing_pdev_group = {
740 .name = NULL,
741 .attrs = softing_pdev_attrs,
742};
743
744/*
745 * platform driver
746 */
747static __devexit int softing_pdev_remove(struct platform_device *pdev)
748{
749 struct softing *card = platform_get_drvdata(pdev);
750 int j;
751
752 /* first, disable card*/
753 softing_card_shutdown(card);
754
755 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
756 if (!card->net[j])
757 continue;
758 softing_netdev_cleanup(card->net[j]);
759 card->net[j] = NULL;
760 }
761 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
762
763 iounmap(card->dpram);
764 kfree(card);
765 return 0;
766}
767
768static __devinit int softing_pdev_probe(struct platform_device *pdev)
769{
770 const struct softing_platform_data *pdat = pdev->dev.platform_data;
771 struct softing *card;
772 struct net_device *netdev;
773 struct softing_priv *priv;
774 struct resource *pres;
775 int ret;
776 int j;
777
778 if (!pdat) {
779 dev_warn(&pdev->dev, "no platform data\n");
780 return -EINVAL;
781 }
782 if (pdat->nbus > ARRAY_SIZE(card->net)) {
783 dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
784 return -EINVAL;
785 }
786
787 card = kzalloc(sizeof(*card), GFP_KERNEL);
788 if (!card)
789 return -ENOMEM;
790 card->pdat = pdat;
791 card->pdev = pdev;
792 platform_set_drvdata(pdev, card);
793 mutex_init(&card->fw.lock);
794 spin_lock_init(&card->spin);
795
796 ret = -EINVAL;
797 pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
798 if (!pres)
799 goto platform_resource_failed;;
800 card->dpram_phys = pres->start;
801 card->dpram_size = pres->end - pres->start + 1;
802 card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
803 if (!card->dpram) {
804 dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
805 goto ioremap_failed;
806 }
807
808 pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
809 if (pres)
810 card->irq.nr = pres->start;
811
812 /* reset card */
813 ret = softing_card_boot(card);
814 if (ret < 0) {
815 dev_alert(&pdev->dev, "failed to boot\n");
816 goto boot_failed;
817 }
818
819 /* only now, the chip's are known */
820 card->id.freq = card->pdat->freq;
821
822 ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
823 if (ret < 0) {
824 dev_alert(&card->pdev->dev, "sysfs failed\n");
825 goto sysfs_failed;
826 }
827
828 ret = -ENOMEM;
829 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
830 card->net[j] = netdev =
831 softing_netdev_create(card, card->id.chip[j]);
832 if (!netdev) {
833 dev_alert(&pdev->dev, "failed to make can[%i]", j);
834 goto netdev_failed;
835 }
836 priv = netdev_priv(card->net[j]);
837 priv->index = j;
838 ret = softing_netdev_register(netdev);
839 if (ret) {
840 free_candev(netdev);
841 card->net[j] = NULL;
842 dev_alert(&card->pdev->dev,
843 "failed to register can[%i]\n", j);
844 goto netdev_failed;
845 }
846 }
847 dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
848 return 0;
849
850netdev_failed:
851 for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
852 if (!card->net[j])
853 continue;
854 softing_netdev_cleanup(card->net[j]);
855 }
856 sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
857sysfs_failed:
858 softing_card_shutdown(card);
859boot_failed:
860 iounmap(card->dpram);
861ioremap_failed:
862platform_resource_failed:
863 kfree(card);
864 return ret;
865}
866
867static struct platform_driver softing_driver = {
868 .driver = {
869 .name = "softing",
870 .owner = THIS_MODULE,
871 },
872 .probe = softing_pdev_probe,
873 .remove = __devexit_p(softing_pdev_remove),
874};
875
876MODULE_ALIAS("platform:softing");
877
878static int __init softing_start(void)
879{
880 return platform_driver_register(&softing_driver);
881}
882
883static void __exit softing_stop(void)
884{
885 platform_driver_unregister(&softing_driver);
886}
887
888module_init(softing_start);
889module_exit(softing_stop);
890
891MODULE_DESCRIPTION("Softing DPRAM CAN driver");
892MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
893MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
1
2#include <linux/platform_device.h>
3
4#ifndef _SOFTING_DEVICE_H_
5#define _SOFTING_DEVICE_H_
6
7/* softing firmware directory prefix */
8#define fw_dir "softing-4.6/"
9
10struct softing_platform_data {
11 unsigned int manf;
12 unsigned int prod;
13 /*
14 * generation
15 * 1st with NEC or SJA1000
16 * 8bit, exclusive interrupt, ...
17 * 2nd only SJA1000
18 * 16bit, shared interrupt
19 */
20 int generation;
21 int nbus; /* # busses on device */
22 unsigned int freq; /* operating frequency in Hz */
23 unsigned int max_brp;
24 unsigned int max_sjw;
25 unsigned long dpram_size;
26 const char *name;
27 struct {
28 unsigned long offs;
29 unsigned long addr;
30 const char *fw;
31 } boot, load, app;
32 /*
33 * reset() function
34 * bring pdev in or out of reset, depending on value
35 */
36 int (*reset)(struct platform_device *pdev, int value);
37 int (*enable_irq)(struct platform_device *pdev, int value);
38};
39
40#endif
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2cbbf8..3437613f0454 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3204 int mac_off = 0; 3204 int mac_off = 0;
3205 3205
3206#if defined(CONFIG_OF) 3206#if defined(CONFIG_SPARC)
3207 const unsigned char *addr; 3207 const unsigned char *addr;
3208#endif 3208#endif
3209 3209
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
3354 if (found & VPD_FOUND_MAC) 3354 if (found & VPD_FOUND_MAC)
3355 goto done; 3355 goto done;
3356 3356
3357#if defined(CONFIG_OF) 3357#if defined(CONFIG_SPARC)
3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL); 3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3359 if (addr != NULL) { 3359 if (addr != NULL) {
3360 memcpy(dev_addr, addr, 6); 3360 memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5032 cassini_debug; 5032 cassini_debug;
5033 5033
5034#if defined(CONFIG_OF) 5034#if defined(CONFIG_SPARC)
5035 cp->of_node = pci_device_to_OF_node(pdev); 5035 cp->of_node = pci_device_to_OF_node(pdev);
5036#endif 5036#endif
5037 5037
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566f..7ff170cbc7dc 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
700{ 700{
701 int i; 701 int i;
702 u32 *page_table = dma->pgtbl; 702 __le32 *page_table = (__le32 *) dma->pgtbl;
703 703
704 for (i = 0; i < dma->num_pages; i++) { 704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */ 705 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 706 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
707 page_table++; 707 page_table++;
708 *page_table = (u32) dma->pg_map_arr[i]; 708 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
709 page_table++; 709 page_table++;
710 } 710 }
711} 711}
@@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
714{ 714{
715 int i; 715 int i;
716 u32 *page_table = dma->pgtbl; 716 __le32 *page_table = (__le32 *) dma->pgtbl;
717 717
718 for (i = 0; i < dma->num_pages; i++) { 718 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */ 719 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff; 720 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
721 page_table++; 721 page_table++;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 722 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
723 page_table++; 723 page_table++;
724 } 724 }
725} 725}
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1eec8c3f..ec35d458102c 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2710,6 +2710,8 @@ static int cxgb_open(struct net_device *dev)
2710 struct port_info *pi = netdev_priv(dev); 2710 struct port_info *pi = netdev_priv(dev);
2711 struct adapter *adapter = pi->adapter; 2711 struct adapter *adapter = pi->adapter;
2712 2712
2713 netif_carrier_off(dev);
2714
2713 if (!(adapter->flags & FULL_INIT_DONE)) { 2715 if (!(adapter->flags & FULL_INIT_DONE)) {
2714 err = cxgb_up(adapter); 2716 err = cxgb_up(adapter);
2715 if (err < 0) 2717 if (err < 0)
@@ -3661,7 +3663,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3661 pi->xact_addr_filt = -1; 3663 pi->xact_addr_filt = -1;
3662 pi->rx_offload = RX_CSO; 3664 pi->rx_offload = RX_CSO;
3663 pi->port_id = i; 3665 pi->port_id = i;
3664 netif_carrier_off(netdev);
3665 netdev->irq = pdev->irq; 3666 netdev->irq = pdev->irq;
3666 3667
3667 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3668 netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4fd..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
1094 } 1094 }
1095 } 1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */ 1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { 1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); 1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 } 1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); 1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
1103 /* 1103 /*
1104 ** Update entry information 1104 ** Update entry information
1105 */ 1105 */
1106 lp->rx_new = (++lp->rx_new) & lp->rxRingMask; 1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 } 1107 }
1108 1108
1109 return 0; 1109 return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
1148 } 1148 }
1149 1149
1150 /* Update all the pointers */ 1150 /* Update all the pointers */
1151 lp->tx_old = (++lp->tx_old) & lp->txRingMask; 1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 } 1152 }
1153 1153
1154 return 0; 1154 return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b897..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
124 case M88E1000_I_PHY_ID: 124 case M88E1000_I_PHY_ID:
125 case M88E1011_I_PHY_ID: 125 case M88E1011_I_PHY_ID:
126 case M88E1111_I_PHY_ID: 126 case M88E1111_I_PHY_ID:
127 case M88E1118_E_PHY_ID:
127 hw->phy_type = e1000_phy_m88; 128 hw->phy_type = e1000_phy_m88;
128 break; 129 break;
129 case IGP01E1000_I_PHY_ID: 130 case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3222 break; 3223 break;
3223 case e1000_ce4100: 3224 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) || 3225 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID)) 3226 (hw->phy_id == RTL8201N_PHY_ID) ||
3227 (hw->phy_id == M88E1118_E_PHY_ID))
3226 match = true; 3228 match = true;
3227 break; 3229 break;
3228 case e1000_82541: 3230 case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6c..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2918#define M88E1011_I_REV_4 0x04 2918#define M88E1011_I_REV_4 0x04
2919#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2920#define M88E1118_E_PHY_ID 0x01410E40
2920#define L1LXT971A_PHY_ID 0x001378E0 2921#define L1LXT971A_PHY_ID 0x001378E0
2921 2922
2922#define RTL8211B_PHY_ID 0x001CC910 2923#define RTL8211B_PHY_ID 0x001CC910
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index de69c54301c1..bfab14092d2c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data)
3478 struct e1000_hw *hw = &adapter->hw; 3478 struct e1000_hw *hw = &adapter->hw;
3479 u32 icr = er32(ICR); 3479 u32 icr = er32(ICR);
3480 3480
3481 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3481 if (unlikely((!icr)))
3482 return IRQ_NONE; /* Not our interrupt */ 3482 return IRQ_NONE; /* Not our interrupt */
3483 3483
3484 /*
3485 * we might have caused the interrupt, but the above
3486 * read cleared it, and just in case the driver is
3487 * down there is nothing to do so return handled
3488 */
3489 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3490 return IRQ_HANDLED;
3491
3484 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3492 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3485 hw->get_link_status = 1; 3493 hw->get_link_status = 1;
3486 /* guard against interrupt when we're going down */ 3494 /* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 1397da118f0d..89a69035e538 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1310 * apply workaround for hardware errata documented in errata 1310 * apply workaround for hardware errata documented in errata
1311 * docs Fixes issue where some error prone or unreliable PCIe 1311 * docs Fixes issue where some error prone or unreliable PCIe
1312 * completions are occurring, particularly with ASPM enabled. 1312 * completions are occurring, particularly with ASPM enabled.
1313 * Without fix, issue can cause tx timeouts. 1313 * Without fix, issue can cause Tx timeouts.
1314 */ 1314 */
1315 reg = er32(GCR2); 1315 reg = er32(GCR2);
1316 reg |= 1; 1316 reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c91369f35..28519acacd2d 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2008 Intel Corporation. 4# Copyright(c) 1999 - 2011 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2e0b7c..13149983d07e 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 5255be753746..e610e1369053 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e45a61c8930a..2fefa820302b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f8ed03dab9b1..fa08b6336cfb 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index e774380c7cec..bc0860a598c9 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ enum e1e_registers {
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ 102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ 103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) 104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106 106
107/* Convenience macros 107/* Convenience macros
108 * 108 *
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 5bb65b7382db..fb46974cfec1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index ff2872153b21..68aa1749bf66 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
533 mac->autoneg_failed = 1; 533 mac->autoneg_failed = 1;
534 return 0; 534 return 0;
535 } 535 }
536 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); 536 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
537 537
538 /* Disable auto-negotiation in the TXCW register */ 538 /* Disable auto-negotiation in the TXCW register */
539 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 539 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
556 * and disable forced link in the Device Control register 556 * and disable forced link in the Device Control register
557 * in an attempt to auto-negotiate with our link partner. 557 * in an attempt to auto-negotiate with our link partner.
558 */ 558 */
559 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); 559 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
560 ew32(TXCW, mac->txcw); 560 ew32(TXCW, mac->txcw);
561 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 561 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
562 562
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
598 mac->autoneg_failed = 1; 598 mac->autoneg_failed = 1;
599 return 0; 599 return 0;
600 } 600 }
601 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); 601 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
602 602
603 /* Disable auto-negotiation in the TXCW register */ 603 /* Disable auto-negotiation in the TXCW register */
604 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 604 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
621 * and disable forced link in the Device Control register 621 * and disable forced link in the Device Control register
622 * in an attempt to auto-negotiate with our link partner. 622 * in an attempt to auto-negotiate with our link partner.
623 */ 623 */
624 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); 624 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
625 ew32(TXCW, mac->txcw); 625 ew32(TXCW, mac->txcw);
626 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 626 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
627 627
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
800 * The possible values of the "fc" parameter are: 800 * The possible values of the "fc" parameter are:
801 * 0: Flow control is completely disabled 801 * 0: Flow control is completely disabled
802 * 1: Rx flow control is enabled (we can receive pause frames, 802 * 1: Rx flow control is enabled (we can receive pause frames,
803 * but not send pause frames). 803 * but not send pause frames).
804 * 2: Tx flow control is enabled (we can send pause frames but we 804 * 2: Tx flow control is enabled (we can send pause frames but we
805 * do not support receiving pause frames). 805 * do not support receiving pause frames).
806 * 3: Both Rx and Tx flow control (symmetric) are enabled. 806 * 3: Both Rx and Tx flow control (symmetric) are enabled.
807 */ 807 */
808 switch (hw->fc.current_mode) { 808 switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
1031 * The possible values of the "fc" parameter are: 1031 * The possible values of the "fc" parameter are:
1032 * 0: Flow control is completely disabled 1032 * 0: Flow control is completely disabled
1033 * 1: Rx flow control is enabled (we can receive pause 1033 * 1: Rx flow control is enabled (we can receive pause
1034 * frames but not send pause frames). 1034 * frames but not send pause frames).
1035 * 2: Tx flow control is enabled (we can send pause frames 1035 * 2: Tx flow control is enabled (we can send pause frames
1036 * frames but we do not receive pause frames). 1036 * frames but we do not receive pause frames).
1037 * 3: Both Rx and Tx flow control (symmetric) is enabled. 1037 * 3: Both Rx and Tx flow control (symmetric) is enabled.
1038 * other: No other values should be possible at this point. 1038 * other: No other values should be possible at this point.
1039 */ 1039 */
@@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1189 } else { 1189 } else {
1190 hw->fc.current_mode = e1000_fc_rx_pause; 1190 hw->fc.current_mode = e1000_fc_rx_pause;
1191 e_dbg("Flow Control = " 1191 e_dbg("Flow Control = "
1192 "RX PAUSE frames only.\r\n"); 1192 "Rx PAUSE frames only.\r\n");
1193 } 1193 }
1194 } 1194 }
1195 /* 1195 /*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fa5b60452547..3065870cf2a7 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@ struct e1000_reg_info {
77 char *name; 77 char *name;
78}; 78};
79 79
80#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ 80#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
81#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ 81#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
82#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ 82#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
83#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ 83#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
84#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ 84#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
85 85
86#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 86#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
87#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 87#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
88#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 88#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
89#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ 89#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
90#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ 90#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
91 91
92static const struct e1000_reg_info e1000_reg_info_tbl[] = { 92static const struct e1000_reg_info e1000_reg_info_tbl[] = {
93 93
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
99 /* Interrupt Registers */ 99 /* Interrupt Registers */
100 {E1000_ICR, "ICR"}, 100 {E1000_ICR, "ICR"},
101 101
102 /* RX Registers */ 102 /* Rx Registers */
103 {E1000_RCTL, "RCTL"}, 103 {E1000_RCTL, "RCTL"},
104 {E1000_RDLEN, "RDLEN"}, 104 {E1000_RDLEN, "RDLEN"},
105 {E1000_RDH, "RDH"}, 105 {E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
115 {E1000_RDFTS, "RDFTS"}, 115 {E1000_RDFTS, "RDFTS"},
116 {E1000_RDFPC, "RDFPC"}, 116 {E1000_RDFPC, "RDFPC"},
117 117
118 /* TX Registers */ 118 /* Tx Registers */
119 {E1000_TCTL, "TCTL"}, 119 {E1000_TCTL, "TCTL"},
120 {E1000_TDBAL, "TDBAL"}, 120 {E1000_TDBAL, "TDBAL"},
121 {E1000_TDBAH, "TDBAH"}, 121 {E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
160 break; 160 break;
161 default: 161 default:
162 printk(KERN_INFO "%-15s %08x\n", 162 printk(KERN_INFO "%-15s %08x\n",
163 reginfo->name, __er32(hw, reginfo->ofs)); 163 reginfo->name, __er32(hw, reginfo->ofs));
164 return; 164 return;
165 } 165 }
166 166
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
171 printk(KERN_CONT "\n"); 171 printk(KERN_CONT "\n");
172} 172}
173 173
174
175/* 174/*
176 * e1000e_dump - Print registers, tx-ring and rx-ring 175 * e1000e_dump - Print registers, Tx-ring and Rx-ring
177 */ 176 */
178static void e1000e_dump(struct e1000_adapter *adapter) 177static void e1000e_dump(struct e1000_adapter *adapter)
179{ 178{
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter)
182 struct e1000_reg_info *reginfo; 181 struct e1000_reg_info *reginfo;
183 struct e1000_ring *tx_ring = adapter->tx_ring; 182 struct e1000_ring *tx_ring = adapter->tx_ring;
184 struct e1000_tx_desc *tx_desc; 183 struct e1000_tx_desc *tx_desc;
185 struct my_u0 { u64 a; u64 b; } *u0; 184 struct my_u0 {
185 u64 a;
186 u64 b;
187 } *u0;
186 struct e1000_buffer *buffer_info; 188 struct e1000_buffer *buffer_info;
187 struct e1000_ring *rx_ring = adapter->rx_ring; 189 struct e1000_ring *rx_ring = adapter->rx_ring;
188 union e1000_rx_desc_packet_split *rx_desc_ps; 190 union e1000_rx_desc_packet_split *rx_desc_ps;
189 struct e1000_rx_desc *rx_desc; 191 struct e1000_rx_desc *rx_desc;
190 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; 192 struct my_u1 {
193 u64 a;
194 u64 b;
195 u64 c;
196 u64 d;
197 } *u1;
191 u32 staterr; 198 u32 staterr;
192 int i = 0; 199 int i = 0;
193 200
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
198 if (netdev) { 205 if (netdev) {
199 dev_info(&adapter->pdev->dev, "Net device Info\n"); 206 dev_info(&adapter->pdev->dev, "Net device Info\n");
200 printk(KERN_INFO "Device Name state " 207 printk(KERN_INFO "Device Name state "
201 "trans_start last_rx\n"); 208 "trans_start last_rx\n");
202 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 209 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
203 netdev->name, 210 netdev->name, netdev->state, netdev->trans_start,
204 netdev->state, 211 netdev->last_rx);
205 netdev->trans_start,
206 netdev->last_rx);
207 } 212 }
208 213
209 /* Print Registers */ 214 /* Print Registers */
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter)
214 e1000_regdump(hw, reginfo); 219 e1000_regdump(hw, reginfo);
215 } 220 }
216 221
217 /* Print TX Ring Summary */ 222 /* Print Tx Ring Summary */
218 if (!netdev || !netif_running(netdev)) 223 if (!netdev || !netif_running(netdev))
219 goto exit; 224 goto exit;
220 225
221 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 226 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
222 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" 227 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
223 " leng ntw timestamp\n"); 228 " leng ntw timestamp\n");
224 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; 229 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
225 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 230 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
226 0, tx_ring->next_to_use, tx_ring->next_to_clean, 231 0, tx_ring->next_to_use, tx_ring->next_to_clean,
227 (unsigned long long)buffer_info->dma, 232 (unsigned long long)buffer_info->dma,
228 buffer_info->length, 233 buffer_info->length,
229 buffer_info->next_to_watch, 234 buffer_info->next_to_watch,
230 (unsigned long long)buffer_info->time_stamp); 235 (unsigned long long)buffer_info->time_stamp);
231 236
232 /* Print TX Rings */ 237 /* Print Tx Ring */
233 if (!netif_msg_tx_done(adapter)) 238 if (!netif_msg_tx_done(adapter))
234 goto rx_ring_summary; 239 goto rx_ring_summary;
235 240
236 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); 241 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
237 242
238 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 243 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
239 * 244 *
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
263 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 268 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
264 */ 269 */
265 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" 270 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
266 " [bi->dma ] leng ntw timestamp bi->skb " 271 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Legacy format\n"); 272 "<-- Legacy format\n");
268 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" 273 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
269 " [bi->dma ] leng ntw timestamp bi->skb " 274 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Context format\n"); 275 "<-- Ext Context format\n");
271 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" 276 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
272 " [bi->dma ] leng ntw timestamp bi->skb " 277 " [bi->dma ] leng ntw timestamp bi->skb "
273 "<-- Ext Data format\n"); 278 "<-- Ext Data format\n");
274 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 279 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
275 tx_desc = E1000_TX_DESC(*tx_ring, i); 280 tx_desc = E1000_TX_DESC(*tx_ring, i);
276 buffer_info = &tx_ring->buffer_info[i]; 281 buffer_info = &tx_ring->buffer_info[i];
277 u0 = (struct my_u0 *)tx_desc; 282 u0 = (struct my_u0 *)tx_desc;
278 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " 283 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
279 "%04X %3X %016llX %p", 284 "%04X %3X %016llX %p",
280 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : 285 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
281 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, 286 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
282 (unsigned long long)le64_to_cpu(u0->a), 287 (unsigned long long)le64_to_cpu(u0->a),
283 (unsigned long long)le64_to_cpu(u0->b), 288 (unsigned long long)le64_to_cpu(u0->b),
284 (unsigned long long)buffer_info->dma, 289 (unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
296 301
297 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 302 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
298 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 303 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
299 16, 1, phys_to_virt(buffer_info->dma), 304 16, 1, phys_to_virt(buffer_info->dma),
300 buffer_info->length, true); 305 buffer_info->length, true);
301 } 306 }
302 307
303 /* Print RX Rings Summary */ 308 /* Print Rx Ring Summary */
304rx_ring_summary: 309rx_ring_summary:
305 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 310 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
306 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 311 printk(KERN_INFO "Queue [NTU] [NTC]\n");
307 printk(KERN_INFO " %5d %5X %5X\n", 0, 312 printk(KERN_INFO " %5d %5X %5X\n", 0,
308 rx_ring->next_to_use, rx_ring->next_to_clean); 313 rx_ring->next_to_use, rx_ring->next_to_clean);
309 314
310 /* Print RX Rings */ 315 /* Print Rx Ring */
311 if (!netif_msg_rx_status(adapter)) 316 if (!netif_msg_rx_status(adapter))
312 goto exit; 317 goto exit;
313 318
314 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 319 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
315 switch (adapter->rx_ps_pages) { 320 switch (adapter->rx_ps_pages) {
316 case 1: 321 case 1:
317 case 2: 322 case 2:
@@ -329,7 +334,7 @@ rx_ring_summary:
329 * +-----------------------------------------------------+ 334 * +-----------------------------------------------------+
330 */ 335 */
331 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " 336 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
332 "[buffer 1 63:0 ] " 337 "[buffer 1 63:0 ] "
333 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " 338 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
334 "[bi->skb] <-- Ext Pkt Split format\n"); 339 "[bi->skb] <-- Ext Pkt Split format\n");
335 /* [Extended] Receive Descriptor (Write-Back) Format 340 /* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@ rx_ring_summary:
344 * 63 48 47 32 31 20 19 0 349 * 63 48 47 32 31 20 19 0
345 */ 350 */
346 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " 351 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
347 "[vl l0 ee es] " 352 "[vl l0 ee es] "
348 "[ l3 l2 l1 hs] [reserved ] ---------------- " 353 "[ l3 l2 l1 hs] [reserved ] ---------------- "
349 "[bi->skb] <-- Ext Rx Write-Back format\n"); 354 "[bi->skb] <-- Ext Rx Write-Back format\n");
350 for (i = 0; i < rx_ring->count; i++) { 355 for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@ rx_ring_summary:
352 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); 357 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
353 u1 = (struct my_u1 *)rx_desc_ps; 358 u1 = (struct my_u1 *)rx_desc_ps;
354 staterr = 359 staterr =
355 le32_to_cpu(rx_desc_ps->wb.middle.status_error); 360 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
356 if (staterr & E1000_RXD_STAT_DD) { 361 if (staterr & E1000_RXD_STAT_DD) {
357 /* Descriptor Done */ 362 /* Descriptor Done */
358 printk(KERN_INFO "RWB[0x%03X] %016llX " 363 printk(KERN_INFO "RWB[0x%03X] %016llX "
359 "%016llX %016llX %016llX " 364 "%016llX %016llX %016llX "
360 "---------------- %p", i, 365 "---------------- %p", i,
361 (unsigned long long)le64_to_cpu(u1->a), 366 (unsigned long long)le64_to_cpu(u1->a),
362 (unsigned long long)le64_to_cpu(u1->b), 367 (unsigned long long)le64_to_cpu(u1->b),
363 (unsigned long long)le64_to_cpu(u1->c), 368 (unsigned long long)le64_to_cpu(u1->c),
364 (unsigned long long)le64_to_cpu(u1->d), 369 (unsigned long long)le64_to_cpu(u1->d),
365 buffer_info->skb); 370 buffer_info->skb);
366 } else { 371 } else {
367 printk(KERN_INFO "R [0x%03X] %016llX " 372 printk(KERN_INFO "R [0x%03X] %016llX "
368 "%016llX %016llX %016llX %016llX %p", i, 373 "%016llX %016llX %016llX %016llX %p", i,
369 (unsigned long long)le64_to_cpu(u1->a), 374 (unsigned long long)le64_to_cpu(u1->a),
370 (unsigned long long)le64_to_cpu(u1->b), 375 (unsigned long long)le64_to_cpu(u1->b),
371 (unsigned long long)le64_to_cpu(u1->c), 376 (unsigned long long)le64_to_cpu(u1->c),
372 (unsigned long long)le64_to_cpu(u1->d), 377 (unsigned long long)le64_to_cpu(u1->d),
373 (unsigned long long)buffer_info->dma, 378 (unsigned long long)buffer_info->dma,
374 buffer_info->skb); 379 buffer_info->skb);
375 380
376 if (netif_msg_pktdata(adapter)) 381 if (netif_msg_pktdata(adapter))
377 print_hex_dump(KERN_INFO, "", 382 print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@ rx_ring_summary:
400 * 63 48 47 40 39 32 31 16 15 0 405 * 63 48 47 40 39 32 31 16 15 0
401 */ 406 */
402 printk(KERN_INFO "Rl[desc] [address 63:0 ] " 407 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
403 "[vl er S cks ln] [bi->dma ] [bi->skb] " 408 "[vl er S cks ln] [bi->dma ] [bi->skb] "
404 "<-- Legacy format\n"); 409 "<-- Legacy format\n");
405 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 410 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
406 rx_desc = E1000_RX_DESC(*rx_ring, i); 411 rx_desc = E1000_RX_DESC(*rx_ring, i);
407 buffer_info = &rx_ring->buffer_info[i]; 412 buffer_info = &rx_ring->buffer_info[i];
408 u0 = (struct my_u0 *)rx_desc; 413 u0 = (struct my_u0 *)rx_desc;
409 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " 414 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
410 "%016llX %p", i, 415 "%016llX %p", i,
411 (unsigned long long)le64_to_cpu(u0->a), 416 (unsigned long long)le64_to_cpu(u0->a),
412 (unsigned long long)le64_to_cpu(u0->b), 417 (unsigned long long)le64_to_cpu(u0->b),
413 (unsigned long long)buffer_info->dma, 418 (unsigned long long)buffer_info->dma,
414 buffer_info->skb); 419 buffer_info->skb);
415 if (i == rx_ring->next_to_use) 420 if (i == rx_ring->next_to_use)
416 printk(KERN_CONT " NTU\n"); 421 printk(KERN_CONT " NTU\n");
417 else if (i == rx_ring->next_to_clean) 422 else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@ rx_ring_summary:
421 426
422 if (netif_msg_pktdata(adapter)) 427 if (netif_msg_pktdata(adapter))
423 print_hex_dump(KERN_INFO, "", 428 print_hex_dump(KERN_INFO, "",
424 DUMP_PREFIX_ADDRESS, 429 DUMP_PREFIX_ADDRESS,
425 16, 1, phys_to_virt(buffer_info->dma), 430 16, 1,
426 adapter->rx_buffer_len, true); 431 phys_to_virt(buffer_info->dma),
432 adapter->rx_buffer_len, true);
427 } 433 }
428 } 434 }
429 435
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
450 * @skb: pointer to sk_buff to be indicated to stack 456 * @skb: pointer to sk_buff to be indicated to stack
451 **/ 457 **/
452static void e1000_receive_skb(struct e1000_adapter *adapter, 458static void e1000_receive_skb(struct e1000_adapter *adapter,
453 struct net_device *netdev, 459 struct net_device *netdev, struct sk_buff *skb,
454 struct sk_buff *skb,
455 u8 status, __le16 vlan) 460 u8 status, __le16 vlan)
456{ 461{
457 skb->protocol = eth_type_trans(skb, netdev); 462 skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
464} 469}
465 470
466/** 471/**
467 * e1000_rx_checksum - Receive Checksum Offload for 82543 472 * e1000_rx_checksum - Receive Checksum Offload
468 * @adapter: board private structure 473 * @adapter: board private structure
469 * @status_err: receive descriptor status and error fields 474 * @status_err: receive descriptor status and error fields
470 * @csum: receive descriptor csum field 475 * @csum: receive descriptor csum field
@@ -548,7 +553,7 @@ map_skb:
548 adapter->rx_buffer_len, 553 adapter->rx_buffer_len,
549 DMA_FROM_DEVICE); 554 DMA_FROM_DEVICE);
550 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 555 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
551 dev_err(&pdev->dev, "RX DMA map failed\n"); 556 dev_err(&pdev->dev, "Rx DMA map failed\n");
552 adapter->rx_dma_failed++; 557 adapter->rx_dma_failed++;
553 break; 558 break;
554 } 559 }
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
601 ps_page = &buffer_info->ps_pages[j]; 606 ps_page = &buffer_info->ps_pages[j];
602 if (j >= adapter->rx_ps_pages) { 607 if (j >= adapter->rx_ps_pages) {
603 /* all unused desc entries get hw null ptr */ 608 /* all unused desc entries get hw null ptr */
604 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); 609 rx_desc->read.buffer_addr[j + 1] =
610 ~cpu_to_le64(0);
605 continue; 611 continue;
606 } 612 }
607 if (!ps_page->page) { 613 if (!ps_page->page) {
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
617 if (dma_mapping_error(&pdev->dev, 623 if (dma_mapping_error(&pdev->dev,
618 ps_page->dma)) { 624 ps_page->dma)) {
619 dev_err(&adapter->pdev->dev, 625 dev_err(&adapter->pdev->dev,
620 "RX DMA page map failed\n"); 626 "Rx DMA page map failed\n");
621 adapter->rx_dma_failed++; 627 adapter->rx_dma_failed++;
622 goto no_buffers; 628 goto no_buffers;
623 } 629 }
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
627 * didn't change because each write-back 633 * didn't change because each write-back
628 * erases this info. 634 * erases this info.
629 */ 635 */
630 rx_desc->read.buffer_addr[j+1] = 636 rx_desc->read.buffer_addr[j + 1] =
631 cpu_to_le64(ps_page->dma); 637 cpu_to_le64(ps_page->dma);
632 } 638 }
633 639
634 skb = netdev_alloc_skb_ip_align(netdev, 640 skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
644 adapter->rx_ps_bsize0, 650 adapter->rx_ps_bsize0,
645 DMA_FROM_DEVICE); 651 DMA_FROM_DEVICE);
646 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 652 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
647 dev_err(&pdev->dev, "RX DMA map failed\n"); 653 dev_err(&pdev->dev, "Rx DMA map failed\n");
648 adapter->rx_dma_failed++; 654 adapter->rx_dma_failed++;
649 /* cleanup skb */ 655 /* cleanup skb */
650 dev_kfree_skb_any(skb); 656 dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
662 * such as IA-64). 668 * such as IA-64).
663 */ 669 */
664 wmb(); 670 wmb();
665 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); 671 writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
666 } 672 }
667 673
668 i++; 674 i++;
@@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
1106 cleaned = 1; 1112 cleaned = 1;
1107 cleaned_count++; 1113 cleaned_count++;
1108 dma_unmap_single(&pdev->dev, buffer_info->dma, 1114 dma_unmap_single(&pdev->dev, buffer_info->dma,
1109 adapter->rx_ps_bsize0, 1115 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1110 DMA_FROM_DEVICE);
1111 buffer_info->dma = 0; 1116 buffer_info->dma = 0;
1112 1117
1113 /* see !EOP comment in other rx routine */ 1118 /* see !EOP comment in other Rx routine */
1114 if (!(staterr & E1000_RXD_STAT_EOP)) 1119 if (!(staterr & E1000_RXD_STAT_EOP))
1115 adapter->flags2 |= FLAG2_IS_DISCARDING; 1120 adapter->flags2 |= FLAG2_IS_DISCARDING;
1116 1121
@@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2610} 2615}
2611 2616
2612/** 2617/**
2613 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 2618 * e1000_configure_tx - Configure Transmit Unit after Reset
2614 * @adapter: board private structure 2619 * @adapter: board private structure
2615 * 2620 *
2616 * Configure the Tx unit of the MAC after a reset. 2621 * Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2663 * hthresh = 1 ==> prefetch when one or more available 2668 * hthresh = 1 ==> prefetch when one or more available
2664 * pthresh = 0x1f ==> prefetch if internal cache 31 or less 2669 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2665 * BEWARE: this seems to work but should be considered first if 2670 * BEWARE: this seems to work but should be considered first if
2666 * there are tx hangs or other tx related bugs 2671 * there are Tx hangs or other Tx related bugs
2667 */ 2672 */
2668 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; 2673 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2669 ew32(TXDCTL(0), txdctl); 2674 ew32(TXDCTL(0), txdctl);
@@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2877 if (adapter->rx_ps_pages) { 2882 if (adapter->rx_ps_pages) {
2878 /* this is a 32 byte descriptor */ 2883 /* this is a 32 byte descriptor */
2879 rdlen = rx_ring->count * 2884 rdlen = rx_ring->count *
2880 sizeof(union e1000_rx_desc_packet_split); 2885 sizeof(union e1000_rx_desc_packet_split);
2881 adapter->clean_rx = e1000_clean_rx_irq_ps; 2886 adapter->clean_rx = e1000_clean_rx_irq_ps;
2882 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 2887 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2883 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { 2888 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2900 /* 2905 /*
2901 * set the writeback threshold (only takes effect if the RDTR 2906 * set the writeback threshold (only takes effect if the RDTR
2902 * is set). set GRAN=1 and write back up to 0x4 worth, and 2907 * is set). set GRAN=1 and write back up to 0x4 worth, and
2903 * enable prefetching of 0x20 rx descriptors 2908 * enable prefetching of 0x20 Rx descriptors
2904 * granularity = 01 2909 * granularity = 01
2905 * wthresh = 04, 2910 * wthresh = 04,
2906 * hthresh = 04, 2911 * hthresh = 04,
@@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2981 * excessive C-state transition latencies result in 2986 * excessive C-state transition latencies result in
2982 * dropped transactions. 2987 * dropped transactions.
2983 */ 2988 */
2984 pm_qos_update_request( 2989 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
2985 &adapter->netdev->pm_qos_req, 55);
2986 } else { 2990 } else {
2987 pm_qos_update_request( 2991 pm_qos_update_request(&adapter->netdev->pm_qos_req,
2988 &adapter->netdev->pm_qos_req, 2992 PM_QOS_DEFAULT_VALUE);
2989 PM_QOS_DEFAULT_VALUE);
2990 } 2993 }
2991 } 2994 }
2992 2995
@@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3152 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3155 /* lower 16 bits has Rx packet buffer allocation size in KB */
3153 pba &= 0xffff; 3156 pba &= 0xffff;
3154 /* 3157 /*
3155 * the Tx fifo also stores 16 bytes of information about the tx 3158 * the Tx fifo also stores 16 bytes of information about the Tx
3156 * but don't include ethernet FCS because hardware appends it 3159 * but don't include ethernet FCS because hardware appends it
3157 */ 3160 */
3158 min_tx_space = (adapter->max_frame_size + 3161 min_tx_space = (adapter->max_frame_size +
@@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3175 pba -= min_tx_space - tx_space; 3178 pba -= min_tx_space - tx_space;
3176 3179
3177 /* 3180 /*
3178 * if short on Rx space, Rx wins and must trump tx 3181 * if short on Rx space, Rx wins and must trump Tx
3179 * adjustment or use Early Receive if available 3182 * adjustment or use Early Receive if available
3180 */ 3183 */
3181 if ((pba < min_rx_space) && 3184 if ((pba < min_rx_space) &&
@@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
4039 adapter->netdev->name, 4042 adapter->netdev->name,
4040 adapter->link_speed, 4043 adapter->link_speed,
4041 (adapter->link_duplex == FULL_DUPLEX) ? 4044 (adapter->link_duplex == FULL_DUPLEX) ?
4042 "Full Duplex" : "Half Duplex", 4045 "Full Duplex" : "Half Duplex",
4043 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 4046 ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
4044 "RX/TX" : 4047 "Rx/Tx" :
4045 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 4048 ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
4046 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 4049 ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
4047} 4050}
4048 4051
4049static bool e1000e_has_link(struct e1000_adapter *adapter) 4052static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4306,7 +4309,6 @@ link_up:
4306 * to get done, so reset controller to flush Tx. 4309 * to get done, so reset controller to flush Tx.
4307 * (Do the reset outside of interrupt context). 4310 * (Do the reset outside of interrupt context).
4308 */ 4311 */
4309 adapter->tx_timeout_count++;
4310 schedule_work(&adapter->reset_task); 4312 schedule_work(&adapter->reset_task);
4311 /* return immediately since reset is imminent */ 4313 /* return immediately since reset is imminent */
4312 return; 4314 return;
@@ -4338,7 +4340,7 @@ link_up:
4338 /* Force detection of hung controller every watchdog period */ 4340 /* Force detection of hung controller every watchdog period */
4339 adapter->detect_tx_hung = 1; 4341 adapter->detect_tx_hung = 1;
4340 4342
4341 /* flush partial descriptors to memory before detecting tx hang */ 4343 /* flush partial descriptors to memory before detecting Tx hang */
4342 if (adapter->flags2 & FLAG2_DMA_BURST) { 4344 if (adapter->flags2 & FLAG2_DMA_BURST) {
4343 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 4345 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4344 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); 4346 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
@@ -4529,7 +4531,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4529 buffer_info->next_to_watch = i; 4531 buffer_info->next_to_watch = i;
4530 buffer_info->dma = dma_map_single(&pdev->dev, 4532 buffer_info->dma = dma_map_single(&pdev->dev,
4531 skb->data + offset, 4533 skb->data + offset,
4532 size, DMA_TO_DEVICE); 4534 size, DMA_TO_DEVICE);
4533 buffer_info->mapped_as_page = false; 4535 buffer_info->mapped_as_page = false;
4534 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 4536 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4535 goto dma_error; 4537 goto dma_error;
@@ -4576,7 +4578,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4576 } 4578 }
4577 } 4579 }
4578 4580
4579 segs = skb_shinfo(skb)->gso_segs ?: 1; 4581 segs = skb_shinfo(skb)->gso_segs ? : 1;
4580 /* multiply data chunks by size of headers */ 4582 /* multiply data chunks by size of headers */
4581 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 4583 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4582 4584
@@ -4588,13 +4590,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
4588 return count; 4590 return count;
4589 4591
4590dma_error: 4592dma_error:
4591 dev_err(&pdev->dev, "TX DMA map failed\n"); 4593 dev_err(&pdev->dev, "Tx DMA map failed\n");
4592 buffer_info->dma = 0; 4594 buffer_info->dma = 0;
4593 if (count) 4595 if (count)
4594 count--; 4596 count--;
4595 4597
4596 while (count--) { 4598 while (count--) {
4597 if (i==0) 4599 if (i == 0)
4598 i += tx_ring->count; 4600 i += tx_ring->count;
4599 i--; 4601 i--;
4600 buffer_info = &tx_ring->buffer_info[i]; 4602 buffer_info = &tx_ring->buffer_info[i];
@@ -6193,7 +6195,7 @@ static int __init e1000_init_module(void)
6193 int ret; 6195 int ret;
6194 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 6196 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6195 e1000e_driver_version); 6197 e1000e_driver_version);
6196 pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); 6198 pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
6197 ret = pci_register_driver(&e1000_driver); 6199 ret = pci_register_driver(&e1000_driver);
6198 6200
6199 return ret; 6201 return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0e4bca..4dd9b63273f6 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak,
62 module_param_array_named(X, X, int, &num_##X, 0); \ 62 module_param_array_named(X, X, int, &num_##X, 0); \
63 MODULE_PARM_DESC(X, desc); 63 MODULE_PARM_DESC(X, desc);
64 64
65
66/* 65/*
67 * Transmit Interrupt Delay in units of 1.024 microseconds 66 * Transmit Interrupt Delay in units of 1.024 microseconds
68 * Tx interrupt delay needs to typically be set to something non zero 67 * Tx interrupt delay needs to typically be set to something non-zero
69 * 68 *
70 * Valid Range: 0-65535 69 * Valid Range: 0-65535
71 */ 70 */
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
112#define DEFAULT_ITR 3 111#define DEFAULT_ITR 3
113#define MAX_ITR 100000 112#define MAX_ITR 100000
114#define MIN_ITR 100 113#define MIN_ITR 100
114
115/* IntMode (Interrupt Mode) 115/* IntMode (Interrupt Mode)
116 * 116 *
117 * Valid Range: 0 - 2 117 * Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 00f89e8a9fa0..6bea051b134b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
640 s32 ret_val; 640 s32 ret_val;
641 u16 phy_data; 641 u16 phy_data;
642 642
643 /* Enable CRS on TX. This must be set for half-duplex operation. */ 643 /* Enable CRS on Tx. This must be set for half-duplex operation. */
644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); 644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
645 if (ret_val) 645 if (ret_val)
646 goto out; 646 goto out;
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
812 if (netif_msg_hw(priv)) 812 if (netif_msg_hw(priv))
813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", 813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
814 endptr + 1); 814 endptr + 1);
815 enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); 815 enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
816} 816}
817 817
818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, 818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6de4675016b5..5ed8f9f9419f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -434,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev)
434static struct net_device_stats *gfar_get_stats(struct net_device *dev) 434static struct net_device_stats *gfar_get_stats(struct net_device *dev)
435{ 435{
436 struct gfar_private *priv = netdev_priv(dev); 436 struct gfar_private *priv = netdev_priv(dev);
437 struct netdev_queue *txq;
438 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 437 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
439 unsigned long tx_packets = 0, tx_bytes = 0; 438 unsigned long tx_packets = 0, tx_bytes = 0;
440 int i = 0; 439 int i = 0;
@@ -450,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
450 dev->stats.rx_dropped = rx_dropped; 449 dev->stats.rx_dropped = rx_dropped;
451 450
452 for (i = 0; i < priv->num_tx_queues; i++) { 451 for (i = 0; i < priv->num_tx_queues; i++) {
453 txq = netdev_get_tx_queue(dev, i); 452 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
454 tx_bytes += txq->tx_bytes; 453 tx_packets += priv->tx_queue[i]->stats.tx_packets;
455 tx_packets += txq->tx_packets;
456 } 454 }
457 455
458 dev->stats.tx_bytes = tx_bytes; 456 dev->stats.tx_bytes = tx_bytes;
@@ -1922,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
1922 if (err) { 1920 if (err) {
1923 for (j = 0; j < i; j++) 1921 for (j = 0; j < i; j++)
1924 free_grp_irqs(&priv->gfargrp[j]); 1922 free_grp_irqs(&priv->gfargrp[j]);
1925 goto irq_fail; 1923 goto irq_fail;
1926 } 1924 }
1927 } 1925 }
1928 1926
@@ -2109,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2109 } 2107 }
2110 2108
2111 /* Update transmit stats */ 2109 /* Update transmit stats */
2112 txq->tx_bytes += skb->len; 2110 tx_queue->stats.tx_bytes += skb->len;
2113 txq->tx_packets ++; 2111 tx_queue->stats.tx_packets++;
2114 2112
2115 txbdp = txbdp_start = tx_queue->cur_tx; 2113 txbdp = txbdp_start = tx_queue->cur_tx;
2116 lstatus = txbdp->lstatus; 2114 lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb88ae0..54de4135e932 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
907 MQ_MG_MODE 907 MQ_MG_MODE
908}; 908};
909 909
910/*
911 * Per TX queue stats
912 */
913struct tx_q_stats {
914 unsigned long tx_packets;
915 unsigned long tx_bytes;
916};
917
910/** 918/**
911 * struct gfar_priv_tx_q - per tx queue structure 919 * struct gfar_priv_tx_q - per tx queue structure
912 * @txlock: per queue tx spin lock 920 * @txlock: per queue tx spin lock
913 * @tx_skbuff:skb pointers 921 * @tx_skbuff:skb pointers
914 * @skb_curtx: to be used skb pointer 922 * @skb_curtx: to be used skb pointer
915 * @skb_dirtytx:the last used skb pointer 923 * @skb_dirtytx:the last used skb pointer
924 * @stats: bytes/packets stats
916 * @qindex: index of this queue 925 * @qindex: index of this queue
917 * @dev: back pointer to the dev structure 926 * @dev: back pointer to the dev structure
918 * @grp: back pointer to the group to which this queue belongs 927 * @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
934 struct txbd8 *tx_bd_base; 943 struct txbd8 *tx_bd_base;
935 struct txbd8 *cur_tx; 944 struct txbd8 *cur_tx;
936 struct txbd8 *dirty_tx; 945 struct txbd8 *dirty_tx;
946 struct tx_q_stats stats;
937 struct net_device *dev; 947 struct net_device *dev;
938 struct gfar_priv_grp *grp; 948 struct gfar_priv_grp *grp;
939 u16 skb_curtx; 949 u16 skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 * 3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB 4 * 2005-2010 (c) Aeroflex Gaisler AB
5 * 5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library. 7 * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
356 dev_dbg(&dev->dev, " starting queue\n"); 356 dev_dbg(&dev->dev, " starting queue\n");
357 netif_start_queue(dev); 357 netif_start_queue(dev);
358 358
359 GRETH_REGSAVE(greth->regs->status, 0xFF);
360
359 napi_enable(&greth->napi); 361 napi_enable(&greth->napi);
360 362
361 greth_enable_irqs(greth); 363 greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
371 373
372 napi_disable(&greth->napi); 374 napi_disable(&greth->napi);
373 375
376 greth_disable_irqs(greth);
374 greth_disable_tx(greth); 377 greth_disable_tx(greth);
378 greth_disable_rx(greth);
375 379
376 netif_stop_queue(dev); 380 netif_stop_queue(dev);
377 381
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
388 struct greth_private *greth = netdev_priv(dev); 392 struct greth_private *greth = netdev_priv(dev);
389 struct greth_bd *bdp; 393 struct greth_bd *bdp;
390 int err = NETDEV_TX_OK; 394 int err = NETDEV_TX_OK;
391 u32 status, dma_addr; 395 u32 status, dma_addr, ctrl;
396 unsigned long flags;
392 397
393 bdp = greth->tx_bd_base + greth->tx_next; 398 /* Clean TX Ring */
399 greth_clean_tx(greth->netdev);
394 400
395 if (unlikely(greth->tx_free <= 0)) { 401 if (unlikely(greth->tx_free <= 0)) {
402 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
403 ctrl = GRETH_REGLOAD(greth->regs->control);
404 /* Enable TX IRQ only if not already in poll() routine */
405 if (ctrl & GRETH_RXI)
406 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
396 netif_stop_queue(dev); 407 netif_stop_queue(dev);
408 spin_unlock_irqrestore(&greth->devlock, flags);
397 return NETDEV_TX_BUSY; 409 return NETDEV_TX_BUSY;
398 } 410 }
399 411
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
406 goto out; 418 goto out;
407 } 419 }
408 420
421 bdp = greth->tx_bd_base + greth->tx_next;
409 dma_addr = greth_read_bd(&bdp->addr); 422 dma_addr = greth_read_bd(&bdp->addr);
410 423
411 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); 424 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
412 425
413 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 426 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
414 427
415 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); 428 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
416 429
417 /* Wrap around descriptor ring */ 430 /* Wrap around descriptor ring */
418 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 431 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
422 greth->tx_next = NEXT_TX(greth->tx_next); 435 greth->tx_next = NEXT_TX(greth->tx_next);
423 greth->tx_free--; 436 greth->tx_free--;
424 437
425 /* No more descriptors */
426 if (unlikely(greth->tx_free == 0)) {
427
428 /* Free transmitted descriptors */
429 greth_clean_tx(dev);
430
431 /* If nothing was cleaned, stop queue & wait for irq */
432 if (unlikely(greth->tx_free == 0)) {
433 status |= GRETH_BD_IE;
434 netif_stop_queue(dev);
435 }
436 }
437
438 /* Write descriptor control word and enable transmission */ 438 /* Write descriptor control word and enable transmission */
439 greth_write_bd(&bdp->stat, status); 439 greth_write_bd(&bdp->stat, status);
440 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
440 greth_enable_tx(greth); 441 greth_enable_tx(greth);
442 spin_unlock_irqrestore(&greth->devlock, flags);
441 443
442out: 444out:
443 dev_kfree_skb(skb); 445 dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
450{ 452{
451 struct greth_private *greth = netdev_priv(dev); 453 struct greth_private *greth = netdev_priv(dev);
452 struct greth_bd *bdp; 454 struct greth_bd *bdp;
453 u32 status = 0, dma_addr; 455 u32 status = 0, dma_addr, ctrl;
454 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 456 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
457 unsigned long flags;
455 458
456 nr_frags = skb_shinfo(skb)->nr_frags; 459 nr_frags = skb_shinfo(skb)->nr_frags;
457 460
461 /* Clean TX Ring */
462 greth_clean_tx_gbit(dev);
463
458 if (greth->tx_free < nr_frags + 1) { 464 if (greth->tx_free < nr_frags + 1) {
465 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
466 ctrl = GRETH_REGLOAD(greth->regs->control);
467 /* Enable TX IRQ only if not already in poll() routine */
468 if (ctrl & GRETH_RXI)
469 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
459 netif_stop_queue(dev); 470 netif_stop_queue(dev);
471 spin_unlock_irqrestore(&greth->devlock, flags);
460 err = NETDEV_TX_BUSY; 472 err = NETDEV_TX_BUSY;
461 goto out; 473 goto out;
462 } 474 }
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
499 greth->tx_skbuff[curr_tx] = NULL; 511 greth->tx_skbuff[curr_tx] = NULL;
500 bdp = greth->tx_bd_base + curr_tx; 512 bdp = greth->tx_bd_base + curr_tx;
501 513
502 status = GRETH_TXBD_CSALL; 514 status = GRETH_TXBD_CSALL | GRETH_BD_EN;
503 status |= frag->size & GRETH_BD_LEN; 515 status |= frag->size & GRETH_BD_LEN;
504 516
505 /* Wrap around descriptor ring */ 517 /* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
509 /* More fragments left */ 521 /* More fragments left */
510 if (i < nr_frags - 1) 522 if (i < nr_frags - 1)
511 status |= GRETH_TXBD_MORE; 523 status |= GRETH_TXBD_MORE;
512 524 else
513 /* ... last fragment, check if out of descriptors */ 525 status |= GRETH_BD_IE; /* enable IRQ on last fragment */
514 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
515
516 /* Enable interrupts and stop queue */
517 status |= GRETH_BD_IE;
518 netif_stop_queue(dev);
519 }
520 526
521 greth_write_bd(&bdp->stat, status); 527 greth_write_bd(&bdp->stat, status);
522 528
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
536 542
537 wmb(); 543 wmb();
538 544
539 /* Enable the descriptors that we configured ... */ 545 /* Enable the descriptor chain by enabling the first descriptor */
540 for (i = 0; i < nr_frags + 1; i++) { 546 bdp = greth->tx_bd_base + greth->tx_next;
541 bdp = greth->tx_bd_base + greth->tx_next; 547 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
542 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 548 greth->tx_next = curr_tx;
543 greth->tx_next = NEXT_TX(greth->tx_next); 549 greth->tx_free -= nr_frags + 1;
544 greth->tx_free--;
545 }
546 550
551 wmb();
552
553 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
547 greth_enable_tx(greth); 554 greth_enable_tx(greth);
555 spin_unlock_irqrestore(&greth->devlock, flags);
548 556
549 return NETDEV_TX_OK; 557 return NETDEV_TX_OK;
550 558
551frag_map_error: 559frag_map_error:
552 /* Unmap SKB mappings that succeeded */ 560 /* Unmap SKB mappings that succeeded and disable descriptor */
553 for (i = 0; greth->tx_next + i != curr_tx; i++) { 561 for (i = 0; greth->tx_next + i != curr_tx; i++) {
554 bdp = greth->tx_bd_base + greth->tx_next + i; 562 bdp = greth->tx_bd_base + greth->tx_next + i;
555 dma_unmap_single(greth->dev, 563 dma_unmap_single(greth->dev,
556 greth_read_bd(&bdp->addr), 564 greth_read_bd(&bdp->addr),
557 greth_read_bd(&bdp->stat) & GRETH_BD_LEN, 565 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
558 DMA_TO_DEVICE); 566 DMA_TO_DEVICE);
567 greth_write_bd(&bdp->stat, 0);
559 } 568 }
560map_error: 569map_error:
561 if (net_ratelimit()) 570 if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
565 return err; 574 return err;
566} 575}
567 576
568
569static irqreturn_t greth_interrupt(int irq, void *dev_id) 577static irqreturn_t greth_interrupt(int irq, void *dev_id)
570{ 578{
571 struct net_device *dev = dev_id; 579 struct net_device *dev = dev_id;
572 struct greth_private *greth; 580 struct greth_private *greth;
573 u32 status; 581 u32 status, ctrl;
574 irqreturn_t retval = IRQ_NONE; 582 irqreturn_t retval = IRQ_NONE;
575 583
576 greth = netdev_priv(dev); 584 greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
580 /* Get the interrupt events that caused us to be here. */ 588 /* Get the interrupt events that caused us to be here. */
581 status = GRETH_REGLOAD(greth->regs->status); 589 status = GRETH_REGLOAD(greth->regs->status);
582 590
583 /* Handle rx and tx interrupts through poll */ 591 /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
584 if (status & (GRETH_INT_RX | GRETH_INT_TX)) { 592 * set regardless of whether IRQ is enabled or not. Especially
585 593 * important when shared IRQ.
586 /* Clear interrupt status */ 594 */
587 GRETH_REGORIN(greth->regs->status, 595 ctrl = GRETH_REGLOAD(greth->regs->control);
588 status & (GRETH_INT_RX | GRETH_INT_TX));
589 596
597 /* Handle rx and tx interrupts through poll */
598 if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
599 ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
590 retval = IRQ_HANDLED; 600 retval = IRQ_HANDLED;
591 601
592 /* Disable interrupts and schedule poll() */ 602 /* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
610 620
611 while (1) { 621 while (1) {
612 bdp = greth->tx_bd_base + greth->tx_last; 622 bdp = greth->tx_bd_base + greth->tx_last;
623 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
624 mb();
613 stat = greth_read_bd(&bdp->stat); 625 stat = greth_read_bd(&bdp->stat);
614 626
615 if (unlikely(stat & GRETH_BD_EN)) 627 if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
670 682
671 /* We only clean fully completed SKBs */ 683 /* We only clean fully completed SKBs */
672 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 684 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
673 stat = bdp_last_frag->stat; 685
686 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
687 mb();
688 stat = greth_read_bd(&bdp_last_frag->stat);
674 689
675 if (stat & GRETH_BD_EN) 690 if (stat & GRETH_BD_EN)
676 break; 691 break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
702 greth->tx_free += nr_frags+1; 717 greth->tx_free += nr_frags+1;
703 dev_kfree_skb(skb); 718 dev_kfree_skb(skb);
704 } 719 }
705 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
706 netif_wake_queue(dev);
707 }
708}
709 720
710static int greth_pending_packets(struct greth_private *greth) 721 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
711{ 722 netif_wake_queue(dev);
712 struct greth_bd *bdp;
713 u32 status;
714 bdp = greth->rx_bd_base + greth->rx_cur;
715 status = greth_read_bd(&bdp->stat);
716 if (status & GRETH_BD_EN)
717 return 0;
718 else
719 return 1;
720} 723}
721 724
722static int greth_rx(struct net_device *dev, int limit) 725static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
727 int pkt_len; 730 int pkt_len;
728 int bad, count; 731 int bad, count;
729 u32 status, dma_addr; 732 u32 status, dma_addr;
733 unsigned long flags;
730 734
731 greth = netdev_priv(dev); 735 greth = netdev_priv(dev);
732 736
733 for (count = 0; count < limit; ++count) { 737 for (count = 0; count < limit; ++count) {
734 738
735 bdp = greth->rx_bd_base + greth->rx_cur; 739 bdp = greth->rx_bd_base + greth->rx_cur;
740 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
741 mb();
736 status = greth_read_bd(&bdp->stat); 742 status = greth_read_bd(&bdp->stat);
737 dma_addr = greth_read_bd(&bdp->addr);
738 bad = 0;
739 743
740 if (unlikely(status & GRETH_BD_EN)) { 744 if (unlikely(status & GRETH_BD_EN)) {
741 break; 745 break;
742 } 746 }
743 747
748 dma_addr = greth_read_bd(&bdp->addr);
749 bad = 0;
750
744 /* Check status for errors. */ 751 /* Check status for errors. */
745 if (unlikely(status & GRETH_RXBD_STATUS)) { 752 if (unlikely(status & GRETH_RXBD_STATUS)) {
746 if (status & GRETH_RXBD_ERR_FT) { 753 if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
802 809
803 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); 810 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
804 811
812 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
805 greth_enable_rx(greth); 813 greth_enable_rx(greth);
814 spin_unlock_irqrestore(&greth->devlock, flags);
806 815
807 greth->rx_cur = NEXT_RX(greth->rx_cur); 816 greth->rx_cur = NEXT_RX(greth->rx_cur);
808 } 817 }
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
836 int pkt_len; 845 int pkt_len;
837 int bad, count = 0; 846 int bad, count = 0;
838 u32 status, dma_addr; 847 u32 status, dma_addr;
848 unsigned long flags;
839 849
840 greth = netdev_priv(dev); 850 greth = netdev_priv(dev);
841 851
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
843 853
844 bdp = greth->rx_bd_base + greth->rx_cur; 854 bdp = greth->rx_bd_base + greth->rx_cur;
845 skb = greth->rx_skbuff[greth->rx_cur]; 855 skb = greth->rx_skbuff[greth->rx_cur];
856 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
857 mb();
846 status = greth_read_bd(&bdp->stat); 858 status = greth_read_bd(&bdp->stat);
847 bad = 0; 859 bad = 0;
848 860
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
865 } 877 }
866 } 878 }
867 879
868 /* Allocate new skb to replace current */ 880 /* Allocate new skb to replace current, not needed if the
869 newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); 881 * current skb can be reused */
870 882 if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
871 if (!bad && newskb) {
872 skb_reserve(newskb, NET_IP_ALIGN); 883 skb_reserve(newskb, NET_IP_ALIGN);
873 884
874 dma_addr = dma_map_single(greth->dev, 885 dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
905 if (net_ratelimit()) 916 if (net_ratelimit())
906 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); 917 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
907 dev_kfree_skb(newskb); 918 dev_kfree_skb(newskb);
919 /* reusing current skb, so it is a drop */
908 dev->stats.rx_dropped++; 920 dev->stats.rx_dropped++;
909 } 921 }
922 } else if (bad) {
923 /* Bad Frame transfer, the skb is reused */
924 dev->stats.rx_dropped++;
910 } else { 925 } else {
926 /* Failed Allocating a new skb. This is rather stupid
927 * but the current "filled" skb is reused, as if
928 * transfer failure. One could argue that RX descriptor
929 * table handling should be divided into cleaning and
930 * filling as the TX part of the driver
931 */
911 if (net_ratelimit()) 932 if (net_ratelimit())
912 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); 933 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
934 /* reusing current skb, so it is a drop */
913 dev->stats.rx_dropped++; 935 dev->stats.rx_dropped++;
914 } 936 }
915 937
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
920 942
921 wmb(); 943 wmb();
922 greth_write_bd(&bdp->stat, status); 944 greth_write_bd(&bdp->stat, status);
945 spin_lock_irqsave(&greth->devlock, flags);
923 greth_enable_rx(greth); 946 greth_enable_rx(greth);
947 spin_unlock_irqrestore(&greth->devlock, flags);
924 greth->rx_cur = NEXT_RX(greth->rx_cur); 948 greth->rx_cur = NEXT_RX(greth->rx_cur);
925 } 949 }
926 950
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
932{ 956{
933 struct greth_private *greth; 957 struct greth_private *greth;
934 int work_done = 0; 958 int work_done = 0;
959 unsigned long flags;
960 u32 mask, ctrl;
935 greth = container_of(napi, struct greth_private, napi); 961 greth = container_of(napi, struct greth_private, napi);
936 962
937 if (greth->gbit_mac) { 963restart_txrx_poll:
938 greth_clean_tx_gbit(greth->netdev); 964 if (netif_queue_stopped(greth->netdev)) {
939 } else { 965 if (greth->gbit_mac)
940 greth_clean_tx(greth->netdev); 966 greth_clean_tx_gbit(greth->netdev);
967 else
968 greth_clean_tx(greth->netdev);
941 } 969 }
942 970
943restart_poll:
944 if (greth->gbit_mac) { 971 if (greth->gbit_mac) {
945 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 972 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
946 } else { 973 } else {
@@ -949,15 +976,29 @@ restart_poll:
949 976
950 if (work_done < budget) { 977 if (work_done < budget) {
951 978
952 napi_complete(napi); 979 spin_lock_irqsave(&greth->devlock, flags);
980
981 ctrl = GRETH_REGLOAD(greth->regs->control);
982 if (netif_queue_stopped(greth->netdev)) {
983 GRETH_REGSAVE(greth->regs->control,
984 ctrl | GRETH_TXI | GRETH_RXI);
985 mask = GRETH_INT_RX | GRETH_INT_RE |
986 GRETH_INT_TX | GRETH_INT_TE;
987 } else {
988 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE;
990 }
953 991
954 if (greth_pending_packets(greth)) { 992 if (GRETH_REGLOAD(greth->regs->status) & mask) {
955 napi_reschedule(napi); 993 GRETH_REGSAVE(greth->regs->control, ctrl);
956 goto restart_poll; 994 spin_unlock_irqrestore(&greth->devlock, flags);
995 goto restart_txrx_poll;
996 } else {
997 __napi_complete(napi);
998 spin_unlock_irqrestore(&greth->devlock, flags);
957 } 999 }
958 } 1000 }
959 1001
960 greth_enable_irqs(greth);
961 return work_done; 1002 return work_done;
962} 1003}
963 1004
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
1152}; 1193};
1153 1194
1154static struct net_device_ops greth_netdev_ops = { 1195static struct net_device_ops greth_netdev_ops = {
1155 .ndo_open = greth_open, 1196 .ndo_open = greth_open,
1156 .ndo_stop = greth_close, 1197 .ndo_stop = greth_close,
1157 .ndo_start_xmit = greth_start_xmit, 1198 .ndo_start_xmit = greth_start_xmit,
1158 .ndo_set_mac_address = greth_set_mac_add, 1199 .ndo_set_mac_address = greth_set_mac_add,
1159 .ndo_validate_addr = eth_validate_addr, 1200 .ndo_validate_addr = eth_validate_addr,
1160}; 1201};
1161 1202
1162static inline int wait_for_mdio(struct greth_private *greth) 1203static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
1217 struct greth_private *greth = netdev_priv(dev); 1258 struct greth_private *greth = netdev_priv(dev);
1218 struct phy_device *phydev = greth->phy; 1259 struct phy_device *phydev = greth->phy;
1219 unsigned long flags; 1260 unsigned long flags;
1220
1221 int status_change = 0; 1261 int status_change = 0;
1262 u32 ctrl;
1222 1263
1223 spin_lock_irqsave(&greth->devlock, flags); 1264 spin_lock_irqsave(&greth->devlock, flags);
1224 1265
1225 if (phydev->link) { 1266 if (phydev->link) {
1226 1267
1227 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { 1268 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228 1269 ctrl = GRETH_REGLOAD(greth->regs->control) &
1229 GRETH_REGANDIN(greth->regs->control, 1270 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1230 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231 1271
1232 if (phydev->duplex) 1272 if (phydev->duplex)
1233 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); 1273 ctrl |= GRETH_CTRL_FD;
1234
1235 if (phydev->speed == SPEED_100) {
1236
1237 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1238 }
1239 1274
1275 if (phydev->speed == SPEED_100)
1276 ctrl |= GRETH_CTRL_SP;
1240 else if (phydev->speed == SPEED_1000) 1277 else if (phydev->speed == SPEED_1000)
1241 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); 1278 ctrl |= GRETH_CTRL_GB;
1242 1279
1280 GRETH_REGSAVE(greth->regs->control, ctrl);
1243 greth->speed = phydev->speed; 1281 greth->speed = phydev->speed;
1244 greth->duplex = phydev->duplex; 1282 greth->duplex = phydev->duplex;
1245 status_change = 1; 1283 status_change = 1;
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = {
1600 { 1638 {
1601 .name = "GAISLER_ETHMAC", 1639 .name = "GAISLER_ETHMAC",
1602 }, 1640 },
1641 {
1642 .name = "01_01d",
1643 },
1603 {}, 1644 {},
1604}; 1645};
1605 1646
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903cd676..be0f2062bd14 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
23#define GRETH_BD_LEN 0x7FF 23#define GRETH_BD_LEN 0x7FF
24 24
25#define GRETH_TXEN 0x1 25#define GRETH_TXEN 0x1
26#define GRETH_INT_TE 0x2
26#define GRETH_INT_TX 0x8 27#define GRETH_INT_TX 0x8
27#define GRETH_TXI 0x4 28#define GRETH_TXI 0x4
28#define GRETH_TXBD_STATUS 0x0001C000 29#define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
35#define GRETH_TXBD_ERR_UE 0x4000 36#define GRETH_TXBD_ERR_UE 0x4000
36#define GRETH_TXBD_ERR_AL 0x8000 37#define GRETH_TXBD_ERR_AL 0x8000
37 38
39#define GRETH_INT_RE 0x1
38#define GRETH_INT_RX 0x4 40#define GRETH_INT_RX 0x4
39#define GRETH_RXEN 0x2 41#define GRETH_RXEN 0x2
40#define GRETH_RXI 0x8 42#define GRETH_RXI 0x8
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
635 635
636 ret = sh_irda_set_baudrate(self, speed); 636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0) 637 if (ret < 0)
638 return ret; 638 goto sh_irda_hard_xmit_end;
639 639
640 self->tx_buff.len = 0; 640 self->tx_buff.len = 0;
641 if (skb->len) { 641 if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
652 652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len); 653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE); 654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 } 655 } else
656 goto sh_irda_hard_xmit_end;
656 657
657 dev_kfree_skb(skb); 658 dev_kfree_skb(skb);
658 659
659 return 0; 660 return 0;
661
662sh_irda_hard_xmit_end:
663 sh_irda_set_baudrate(self, 9600);
664 netif_wake_queue(self->ndev);
665 sh_irda_rcv_ctrl(self, 1);
666 dev_kfree_skb(skb);
667
668 return ret;
669
660} 670}
661 671
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) 672static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1371 1371
1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1373
1374 /* clear VMDq pool/queue selection for RAR 0 */
1375 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1373 } 1376 }
1374 hw->addr_ctrl.overflow_promisc = 0; 1377 hw->addr_ctrl.overflow_promisc = 0;
1375 1378
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..8753980668c7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -165,7 +165,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 166 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 167 u32 fcbuff, fcdmarw, fcfltrw;
168 dma_addr_t addr; 168 dma_addr_t addr = 0;
169 169
170 if (!netdev || !sgl) 170 if (!netdev || !sgl)
171 return 0; 171 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a060610a42db..fbae703b46d7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "3.0.12-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3176 u32 mhadd, hlreg0; 3176 u32 mhadd, hlreg0;
3177 3177
3178 /* Decide whether to use packet split mode or not */ 3178 /* Decide whether to use packet split mode or not */
3179 /* On by default */
3180 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3181
3179 /* Do not use packet split if we're in SR-IOV Mode */ 3182 /* Do not use packet split if we're in SR-IOV Mode */
3180 if (!adapter->num_vfs) 3183 if (adapter->num_vfs)
3181 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3184 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3185
3186 /* Disable packet split due to 82599 erratum #45 */
3187 if (hw->mac.type == ixgbe_mac_82599EB)
3188 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3182 3189
3183 /* Set the RX buffer length according to the mode */ 3190 /* Set the RX buffer length according to the mode */
3184 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -4863,16 +4870,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4863{ 4870{
4864 int q_idx, num_q_vectors; 4871 int q_idx, num_q_vectors;
4865 struct ixgbe_q_vector *q_vector; 4872 struct ixgbe_q_vector *q_vector;
4866 int napi_vectors;
4867 int (*poll)(struct napi_struct *, int); 4873 int (*poll)(struct napi_struct *, int);
4868 4874
4869 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4875 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4870 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4876 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4871 napi_vectors = adapter->num_rx_queues;
4872 poll = &ixgbe_clean_rxtx_many; 4877 poll = &ixgbe_clean_rxtx_many;
4873 } else { 4878 } else {
4874 num_q_vectors = 1; 4879 num_q_vectors = 1;
4875 napi_vectors = 1;
4876 poll = &ixgbe_poll; 4880 poll = &ixgbe_poll;
4877 } 4881 }
4878 4882
@@ -6667,8 +6671,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6667 struct ixgbe_adapter *adapter, 6671 struct ixgbe_adapter *adapter,
6668 struct ixgbe_ring *tx_ring) 6672 struct ixgbe_ring *tx_ring)
6669{ 6673{
6670 struct net_device *netdev = tx_ring->netdev;
6671 struct netdev_queue *txq;
6672 unsigned int first; 6674 unsigned int first;
6673 unsigned int tx_flags = 0; 6675 unsigned int tx_flags = 0;
6674 u8 hdr_len = 0; 6676 u8 hdr_len = 0;
@@ -6765,9 +6767,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6765 /* add the ATR filter if ATR is on */ 6767 /* add the ATR filter if ATR is on */
6766 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 6768 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6767 ixgbe_atr(tx_ring, skb, tx_flags, protocol); 6769 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6768 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6769 txq->tx_bytes += skb->len;
6770 txq->tx_packets++;
6771 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); 6770 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6772 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 6771 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6773 6772
@@ -6925,8 +6924,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6925 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6924 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6926 int i; 6925 int i;
6927 6926
6928 /* accurate rx/tx bytes/packets stats */
6929 dev_txq_stats_fold(netdev, stats);
6930 rcu_read_lock(); 6927 rcu_read_lock();
6931 for (i = 0; i < adapter->num_rx_queues; i++) { 6928 for (i = 0; i < adapter->num_rx_queues; i++) {
6932 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); 6929 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6943,6 +6940,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6943 stats->rx_bytes += bytes; 6940 stats->rx_bytes += bytes;
6944 } 6941 }
6945 } 6942 }
6943
6944 for (i = 0; i < adapter->num_tx_queues; i++) {
6945 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6946 u64 bytes, packets;
6947 unsigned int start;
6948
6949 if (ring) {
6950 do {
6951 start = u64_stats_fetch_begin_bh(&ring->syncp);
6952 packets = ring->stats.packets;
6953 bytes = ring->stats.bytes;
6954 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6955 stats->tx_packets += packets;
6956 stats->tx_bytes += bytes;
6957 }
6958 }
6946 rcu_read_unlock(); 6959 rcu_read_unlock();
6947 /* following stats updated by ixgbe_watchdog_task() */ 6960 /* following stats updated by ixgbe_watchdog_task() */
6948 stats->multicast = netdev->stats.multicast; 6961 stats->multicast = netdev->stats.multicast;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113
114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
115{ 114{
116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
117 vmolr |= (IXGBE_VMOLR_ROMPE | 116 vmolr |= (IXGBE_VMOLR_ROMPE |
118 IXGBE_VMOLR_ROPE |
119 IXGBE_VMOLR_BAM); 117 IXGBE_VMOLR_BAM);
120 if (aupe) 118 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE; 119 vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
133 } 133 }
134 134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
137 IXGBE_WRITE_FLUSH(hw); 137 IXGBE_WRITE_FLUSH(hw);
138 138
139 /* Poll for reset bit to self-clear indicating reset is complete */ 139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) { 140 for (i = 0; i < 10; i++) {
141 udelay(1); 141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST)) 143 if (!(ctrl & reset_bit))
144 break; 144 break;
145 } 145 }
146 if (ctrl & IXGBE_CTRL_RST) { 146 if (ctrl & reset_bit) {
147 status = IXGBE_ERR_RESET_FAILED; 147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 149 }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845affea13..5933621ac3ff 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@ err:
585 rcu_read_lock_bh(); 585 rcu_read_lock_bh();
586 vlan = rcu_dereference(q->vlan); 586 vlan = rcu_dereference(q->vlan);
587 if (vlan) 587 if (vlan)
588 netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; 588 vlan->dev->stats.tx_dropped++;
589 rcu_read_unlock_bh(); 589 rcu_read_unlock_bh();
590 590
591 return err; 591 return err;
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42d0ced..32f947154c33 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
113void mlx4_start_catas_poll(struct mlx4_dev *dev) 113void mlx4_start_catas_poll(struct mlx4_dev *dev)
114{ 114{
115 struct mlx4_priv *priv = mlx4_priv(dev); 115 struct mlx4_priv *priv = mlx4_priv(dev);
116 unsigned long addr; 116 phys_addr_t addr;
117 117
118 INIT_LIST_HEAD(&priv->catas_err.list); 118 INIT_LIST_HEAD(&priv->catas_err.list);
119 init_timer(&priv->catas_err.timer); 119 init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
124 124
125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 125 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
126 if (!priv->catas_err.map) { 126 if (!priv->catas_err.map) {
127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", 127 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
128 addr); 128 (unsigned long long) addr);
129 return; 129 return;
130 } 130 }
131 131
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index f6e0d40cd876..1ff6ca6466ed 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
202 if (mlx4_uar_alloc(dev, &mdev->priv_uar)) 202 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
203 goto err_pd; 203 goto err_pd;
204 204
205 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 205 mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
206 PAGE_SIZE);
206 if (!mdev->uar_map) 207 if (!mdev->uar_map)
207 goto err_uar; 208 goto err_uar;
208 spin_lock_init(&mdev->uar_lock); 209 spin_lock_init(&mdev->uar_lock);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 782f11d8fa71..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
829 goto err_uar_table_free; 829 goto err_uar_table_free;
830 } 830 }
831 831
832 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 832 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
833 if (!priv->kar) { 833 if (!priv->kar) {
834 mlx4_err(dev, "Couldn't map kernel access region, " 834 mlx4_err(dev, "Couldn't map kernel access region, "
835 "aborting.\n"); 835 "aborting.\n");
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1289 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1290 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1291 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1292 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1293 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1294 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1295 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1296 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1297 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1298 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1299 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1300 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1301 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1302 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1303 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1289 { 0, } 1304 { 0, }
1290}; 1305};
1291 1306
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7ef7b6..79cf42db2ea9 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
95 * entry in hash chain and *mgm holds end of hash chain. 95 * entry in hash chain and *mgm holds end of hash chain.
96 */ 96 */
97static int find_mgm(struct mlx4_dev *dev, 97static int find_mgm(struct mlx4_dev *dev,
98 u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, 98 u8 *gid, enum mlx4_protocol protocol,
99 struct mlx4_cmd_mailbox *mgm_mailbox,
99 u16 *hash, int *prev, int *index) 100 u16 *hash, int *prev, int *index)
100{ 101{
101 struct mlx4_cmd_mailbox *mailbox; 102 struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev,
134 return err; 135 return err;
135 } 136 }
136 137
137 if (!memcmp(mgm->gid, gid, 16)) 138 if (!memcmp(mgm->gid, gid, 16) &&
139 be32_to_cpu(mgm->members_count) >> 30 == protocol)
138 return err; 140 return err;
139 141
140 *prev = *index; 142 *prev = *index;
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev,
146} 148}
147 149
148int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 150int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
149 int block_mcast_loopback) 151 int block_mcast_loopback, enum mlx4_protocol protocol)
150{ 152{
151 struct mlx4_priv *priv = mlx4_priv(dev); 153 struct mlx4_priv *priv = mlx4_priv(dev);
152 struct mlx4_cmd_mailbox *mailbox; 154 struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
165 167
166 mutex_lock(&priv->mcg_table.mutex); 168 mutex_lock(&priv->mcg_table.mutex);
167 169
168 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 170 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
169 if (err) 171 if (err)
170 goto out; 172 goto out;
171 173
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
187 memcpy(mgm->gid, gid, 16); 189 memcpy(mgm->gid, gid, 16);
188 } 190 }
189 191
190 members_count = be32_to_cpu(mgm->members_count); 192 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
191 if (members_count == MLX4_QP_PER_MGM) { 193 if (members_count == MLX4_QP_PER_MGM) {
192 mlx4_err(dev, "MGM at index %x is full.\n", index); 194 mlx4_err(dev, "MGM at index %x is full.\n", index);
193 err = -ENOMEM; 195 err = -ENOMEM;
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
207 else 209 else
208 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
209 211
210 mgm->members_count = cpu_to_be32(members_count); 212 mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
211 213
212 err = mlx4_WRITE_MCG(dev, index, mailbox); 214 err = mlx4_WRITE_MCG(dev, index, mailbox);
213 if (err) 215 if (err)
@@ -242,7 +244,8 @@ out:
242} 244}
243EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 245EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
244 246
245int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) 247int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
248 enum mlx4_protocol protocol)
246{ 249{
247 struct mlx4_priv *priv = mlx4_priv(dev); 250 struct mlx4_priv *priv = mlx4_priv(dev);
248 struct mlx4_cmd_mailbox *mailbox; 251 struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
260 263
261 mutex_lock(&priv->mcg_table.mutex); 264 mutex_lock(&priv->mcg_table.mutex);
262 265
263 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); 266 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
264 if (err) 267 if (err)
265 goto out; 268 goto out;
266 269
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
270 goto out; 273 goto out;
271 } 274 }
272 275
273 members_count = be32_to_cpu(mgm->members_count); 276 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
274 for (loc = -1, i = 0; i < members_count; ++i) 277 for (loc = -1, i = 0; i < members_count; ++i)
275 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
276 loc = i; 279 loc = i;
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
282 } 285 }
283 286
284 287
285 mgm->members_count = cpu_to_be32(--members_count); 288 mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
286 mgm->qp[loc] = mgm->qp[i - 1]; 289 mgm->qp[loc] = mgm->qp[i - 1];
287 mgm->qp[i - 1] = 0; 290 mgm->qp[i - 1] = 0;
288 291
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a37fcf11ab36..ea5cfe2c3a04 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
3403 return -EIO; 3403 return -EIO;
3404 } 3404 }
3405 3405
3406 status = pci_restore_state(pdev); 3406 pci_restore_state(pdev);
3407 if (status)
3408 return status;
3409 3407
3410 status = pci_enable_device(pdev); 3408 status = pci_enable_device(pdev);
3411 if (status) { 3409 if (status) {
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad82..9fb59d3f9c92 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
4489{ 4489{
4490 struct niu_parent *parent = np->parent; 4490 struct niu_parent *parent = np->parent;
4491 int first_rx_channel, first_tx_channel; 4491 int first_rx_channel, first_tx_channel;
4492 int num_rx_rings, num_tx_rings;
4493 struct rx_ring_info *rx_rings;
4494 struct tx_ring_info *tx_rings;
4492 int i, port, err; 4495 int i, port, err;
4493 4496
4494 port = np->port; 4497 port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
4498 first_tx_channel += parent->txchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i];
4499 } 4502 }
4500 4503
4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 num_rx_rings = parent->rxchan_per_port[port];
4502 np->num_tx_rings = parent->txchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port];
4503 4506
4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); 4508 GFP_KERNEL);
4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!rx_rings)
4511 goto out_err; 4511 goto out_err;
4512 4512
4513 np->num_rx_rings = num_rx_rings;
4514 smp_wmb();
4515 np->rx_rings = rx_rings;
4516
4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4518
4513 for (i = 0; i < np->num_rx_rings; i++) { 4519 for (i = 0; i < np->num_rx_rings; i++) {
4514 struct rx_ring_info *rp = &np->rx_rings[i]; 4520 struct rx_ring_info *rp = &np->rx_rings[i];
4515 4521
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4544 return err;
4539 } 4545 }
4540 4546
4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4548 GFP_KERNEL);
4543 err = -ENOMEM; 4549 err = -ENOMEM;
4544 if (!np->tx_rings) 4550 if (!tx_rings)
4545 goto out_err; 4551 goto out_err;
4546 4552
4553 np->num_tx_rings = num_tx_rings;
4554 smp_wmb();
4555 np->tx_rings = tx_rings;
4556
4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4558
4547 for (i = 0; i < np->num_tx_rings; i++) { 4559 for (i = 0; i < np->num_tx_rings; i++) {
4548 struct tx_ring_info *rp = &np->tx_rings[i]; 4560 struct tx_ring_info *rp = &np->tx_rings[i];
4549 4561
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
6246static void niu_get_rx_stats(struct niu *np) 6258static void niu_get_rx_stats(struct niu *np)
6247{ 6259{
6248 unsigned long pkts, dropped, errors, bytes; 6260 unsigned long pkts, dropped, errors, bytes;
6261 struct rx_ring_info *rx_rings;
6249 int i; 6262 int i;
6250 6263
6251 pkts = dropped = errors = bytes = 0; 6264 pkts = dropped = errors = bytes = 0;
6265
6266 rx_rings = ACCESS_ONCE(np->rx_rings);
6267 if (!rx_rings)
6268 goto no_rings;
6269
6252 for (i = 0; i < np->num_rx_rings; i++) { 6270 for (i = 0; i < np->num_rx_rings; i++) {
6253 struct rx_ring_info *rp = &np->rx_rings[i]; 6271 struct rx_ring_info *rp = &rx_rings[i];
6254 6272
6255 niu_sync_rx_discard_stats(np, rp, 0); 6273 niu_sync_rx_discard_stats(np, rp, 0);
6256 6274
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
6259 dropped += rp->rx_dropped; 6277 dropped += rp->rx_dropped;
6260 errors += rp->rx_errors; 6278 errors += rp->rx_errors;
6261 } 6279 }
6280
6281no_rings:
6262 np->dev->stats.rx_packets = pkts; 6282 np->dev->stats.rx_packets = pkts;
6263 np->dev->stats.rx_bytes = bytes; 6283 np->dev->stats.rx_bytes = bytes;
6264 np->dev->stats.rx_dropped = dropped; 6284 np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
6268static void niu_get_tx_stats(struct niu *np) 6288static void niu_get_tx_stats(struct niu *np)
6269{ 6289{
6270 unsigned long pkts, errors, bytes; 6290 unsigned long pkts, errors, bytes;
6291 struct tx_ring_info *tx_rings;
6271 int i; 6292 int i;
6272 6293
6273 pkts = errors = bytes = 0; 6294 pkts = errors = bytes = 0;
6295
6296 tx_rings = ACCESS_ONCE(np->tx_rings);
6297 if (!tx_rings)
6298 goto no_rings;
6299
6274 for (i = 0; i < np->num_tx_rings; i++) { 6300 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i]; 6301 struct tx_ring_info *rp = &tx_rings[i];
6276 6302
6277 pkts += rp->tx_packets; 6303 pkts += rp->tx_packets;
6278 bytes += rp->tx_bytes; 6304 bytes += rp->tx_bytes;
6279 errors += rp->tx_errors; 6305 errors += rp->tx_errors;
6280 } 6306 }
6307
6308no_rings:
6281 np->dev->stats.tx_packets = pkts; 6309 np->dev->stats.tx_packets = pkts;
6282 np->dev->stats.tx_bytes = bytes; 6310 np->dev->stats.tx_bytes = bytes;
6283 np->dev->stats.tx_errors = errors; 6311 np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6287{ 6315{
6288 struct niu *np = netdev_priv(dev); 6316 struct niu *np = netdev_priv(dev);
6289 6317
6290 niu_get_rx_stats(np); 6318 if (netif_running(dev)) {
6291 niu_get_tx_stats(np); 6319 niu_get_rx_stats(np);
6292 6320 niu_get_tx_stats(np);
6321 }
6293 return &dev->stats; 6322 return &dev->stats;
6294} 6323}
6295 6324
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1988 } 1988 }
1989 1989
1990 ndev = alloc_etherdev(sizeof(struct ns83820)); 1990 ndev = alloc_etherdev(sizeof(struct ns83820));
1991 dev = PRIV(ndev);
1992
1993 err = -ENOMEM; 1991 err = -ENOMEM;
1994 if (!dev) 1992 if (!ndev)
1995 goto out; 1993 goto out;
1996 1994
1995 dev = PRIV(ndev);
1997 dev->ndev = ndev; 1996 dev->ndev = ndev;
1998 1997
1999 spin_lock_init(&dev->rx_info.lock); 1998 spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d7355306a738..4c9a7d4f3fca 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -519,7 +519,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
519 struct pch_gbe_adapter *adapter; 519 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task); 520 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521 521
522 rtnl_lock();
522 pch_gbe_reinit_locked(adapter); 523 pch_gbe_reinit_locked(adapter);
524 rtnl_unlock();
523} 525}
524 526
525/** 527/**
@@ -528,14 +530,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
528 */ 530 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) 531void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{ 532{
531 struct net_device *netdev = adapter->netdev; 533 pch_gbe_down(adapter);
532 534 pch_gbe_up(adapter);
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539} 535}
540 536
541/** 537/**
@@ -2247,7 +2243,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
2247 struct net_device *netdev = pci_get_drvdata(pdev); 2243 struct net_device *netdev = pci_get_drvdata(pdev);
2248 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2244 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2249 2245
2250 flush_scheduled_work(); 2246 cancel_work_sync(&adapter->reset_task);
2251 unregister_netdev(netdev); 2247 unregister_netdev(netdev);
2252 2248
2253 pch_gbe_hal_phy_hw_reset(&adapter->hw); 2249 pch_gbe_hal_phy_hw_reset(&adapter->hw);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac8551..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
1488 1488
1489 /* 1489 /*
1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
1491 * Early datasheets said to poll the reset bit, but now they say that 1491 * We wait at least 2ms.
1492 * it "is not a reliable indicator and subsequently should be ignored."
1493 * We wait at least 10ms.
1494 */ 1492 */
1495 1493
1496 mdelay(10); 1494 mdelay(2);
1497 1495
1498 /* 1496 /*
1499 * Reset RBCR[01] back to zero as per magic incantation. 1497 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bb8645ab247c..59ccf0c5c610 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -554,6 +554,8 @@ struct rtl8169_private {
554 struct mii_if_info mii; 554 struct mii_if_info mii;
555 struct rtl8169_counters counters; 555 struct rtl8169_counters counters;
556 u32 saved_wolopts; 556 u32 saved_wolopts;
557
558 const struct firmware *fw;
557}; 559};
558 560
559MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 561MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -971,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
971 if (pm) 973 if (pm)
972 pm_request_resume(&tp->pci_dev->dev); 974 pm_request_resume(&tp->pci_dev->dev);
973 netif_carrier_on(dev); 975 netif_carrier_on(dev);
974 netif_info(tp, ifup, dev, "link up\n"); 976 if (net_ratelimit())
977 netif_info(tp, ifup, dev, "link up\n");
975 } else { 978 } else {
976 netif_carrier_off(dev); 979 netif_carrier_off(dev);
977 netif_info(tp, ifdown, dev, "link down\n"); 980 netif_info(tp, ifdown, dev, "link down\n");
@@ -1766,6 +1769,29 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1766 } 1769 }
1767} 1770}
1768 1771
1772static void rtl_release_firmware(struct rtl8169_private *tp)
1773{
1774 release_firmware(tp->fw);
1775 tp->fw = NULL;
1776}
1777
1778static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
1779{
1780 const struct firmware **fw = &tp->fw;
1781 int rc = !*fw;
1782
1783 if (rc) {
1784 rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
1785 if (rc < 0)
1786 goto out;
1787 }
1788
1789 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
1790 rtl_phy_write_fw(tp, *fw);
1791out:
1792 return rc;
1793}
1794
1769static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) 1795static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1770{ 1796{
1771 static const struct phy_reg phy_reg_init[] = { 1797 static const struct phy_reg phy_reg_init[] = {
@@ -2139,7 +2165,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2139 { 0x0d, 0xf880 } 2165 { 0x0d, 0xf880 }
2140 }; 2166 };
2141 void __iomem *ioaddr = tp->mmio_addr; 2167 void __iomem *ioaddr = tp->mmio_addr;
2142 const struct firmware *fw;
2143 2168
2144 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2169 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2145 2170
@@ -2203,11 +2228,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2203 2228
2204 rtl_writephy(tp, 0x1f, 0x0005); 2229 rtl_writephy(tp, 0x1f, 0x0005);
2205 rtl_writephy(tp, 0x05, 0x001b); 2230 rtl_writephy(tp, 0x05, 0x001b);
2206 if (rtl_readphy(tp, 0x06) == 0xbf00 && 2231 if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
2207 request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { 2232 (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
2208 rtl_phy_write_fw(tp, fw);
2209 release_firmware(fw);
2210 } else {
2211 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2233 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2212 } 2234 }
2213 2235
@@ -2257,7 +2279,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2257 { 0x0d, 0xf880 } 2279 { 0x0d, 0xf880 }
2258 }; 2280 };
2259 void __iomem *ioaddr = tp->mmio_addr; 2281 void __iomem *ioaddr = tp->mmio_addr;
2260 const struct firmware *fw;
2261 2282
2262 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2283 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2263 2284
@@ -2312,11 +2333,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2312 2333
2313 rtl_writephy(tp, 0x1f, 0x0005); 2334 rtl_writephy(tp, 0x1f, 0x0005);
2314 rtl_writephy(tp, 0x05, 0x001b); 2335 rtl_writephy(tp, 0x05, 0x001b);
2315 if (rtl_readphy(tp, 0x06) == 0xb300 && 2336 if ((rtl_readphy(tp, 0x06) != 0xb300) ||
2316 request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { 2337 (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
2317 rtl_phy_write_fw(tp, fw);
2318 release_firmware(fw);
2319 } else {
2320 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); 2338 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2321 } 2339 }
2322 2340
@@ -3200,6 +3218,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3200 3218
3201 cancel_delayed_work_sync(&tp->task); 3219 cancel_delayed_work_sync(&tp->task);
3202 3220
3221 rtl_release_firmware(tp);
3222
3203 unregister_netdev(dev); 3223 unregister_netdev(dev);
3204 3224
3205 if (pci_dev_run_wake(pdev)) 3225 if (pci_dev_run_wake(pdev))
@@ -3738,7 +3758,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
3738 RTL_W16(IntrMitigate, 0x5151); 3758 RTL_W16(IntrMitigate, 0x5151);
3739 3759
3740 /* Work around for RxFIFO overflow. */ 3760 /* Work around for RxFIFO overflow. */
3741 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 3761 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
3762 tp->mac_version == RTL_GIGA_MAC_VER_22) {
3742 tp->intr_event |= RxFIFOOver | PCSTimeout; 3763 tp->intr_event |= RxFIFOOver | PCSTimeout;
3743 tp->intr_event &= ~RxOverflow; 3764 tp->intr_event &= ~RxOverflow;
3744 } 3765 }
@@ -4620,12 +4641,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4620 break; 4641 break;
4621 } 4642 }
4622 4643
4623 /* Work around for rx fifo overflow */ 4644 if (unlikely(status & RxFIFOOver)) {
4624 if (unlikely(status & RxFIFOOver) && 4645 switch (tp->mac_version) {
4625 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4646 /* Work around for rx fifo overflow */
4626 netif_stop_queue(dev); 4647 case RTL_GIGA_MAC_VER_11:
4627 rtl8169_tx_timeout(dev); 4648 case RTL_GIGA_MAC_VER_22:
4628 break; 4649 case RTL_GIGA_MAC_VER_26:
4650 netif_stop_queue(dev);
4651 rtl8169_tx_timeout(dev);
4652 goto done;
4653 /* Testers needed. */
4654 case RTL_GIGA_MAC_VER_17:
4655 case RTL_GIGA_MAC_VER_19:
4656 case RTL_GIGA_MAC_VER_20:
4657 case RTL_GIGA_MAC_VER_21:
4658 case RTL_GIGA_MAC_VER_23:
4659 case RTL_GIGA_MAC_VER_24:
4660 case RTL_GIGA_MAC_VER_27:
4661 case RTL_GIGA_MAC_VER_28:
4662 /* Experimental science. Pktgen proof. */
4663 case RTL_GIGA_MAC_VER_12:
4664 case RTL_GIGA_MAC_VER_25:
4665 if (status == RxFIFOOver)
4666 goto done;
4667 break;
4668 default:
4669 break;
4670 }
4629 } 4671 }
4630 4672
4631 if (unlikely(status & SYSErr)) { 4673 if (unlikely(status & SYSErr)) {
@@ -4661,7 +4703,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4661 (status & RxFIFOOver) ? (status | RxOverflow) : status); 4703 (status & RxFIFOOver) ? (status | RxOverflow) : status);
4662 status = RTL_R16(IntrStatus); 4704 status = RTL_R16(IntrStatus);
4663 } 4705 }
4664 4706done:
4665 return IRQ_RETVAL(handled); 4707 return IRQ_RETVAL(handled);
4666} 4708}
4667 4709
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c6e675..002bac743843 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void)
1153 int count; 1153 int count;
1154 int cpu; 1154 int cpu;
1155 1155
1156 if (rss_cpus)
1157 return rss_cpus;
1158
1156 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { 1159 if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
1157 printk(KERN_WARNING 1160 printk(KERN_WARNING
1158 "sfc: RSS disabled due to allocation failure\n"); 1161 "sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1266 efx->legacy_irq = 0; 1269 efx->legacy_irq = 0;
1267} 1270}
1268 1271
1269struct efx_tx_queue *
1270efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1271{
1272 unsigned tx_channel_offset =
1273 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1274 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1275 type >= EFX_TXQ_TYPES);
1276 return &efx->channel[tx_channel_offset + index]->tx_queue[type];
1277}
1278
1279static void efx_set_channels(struct efx_nic *efx) 1272static void efx_set_channels(struct efx_nic *efx)
1280{ 1273{
1281 struct efx_channel *channel; 1274 struct efx_channel *channel;
1282 struct efx_tx_queue *tx_queue; 1275 struct efx_tx_queue *tx_queue;
1283 unsigned tx_channel_offset = 1276
1277 efx->tx_channel_offset =
1284 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1285 1279
1286 /* Channel pointers were set in efx_init_struct() but we now 1280 /* Channel pointers were set in efx_init_struct() but we now
1287 * need to clear them for TX queues in any RX-only channels. */ 1281 * need to clear them for TX queues in any RX-only channels. */
1288 efx_for_each_channel(channel, efx) { 1282 efx_for_each_channel(channel, efx) {
1289 if (channel->channel - tx_channel_offset >= 1283 if (channel->channel - efx->tx_channel_offset >=
1290 efx->n_tx_channels) { 1284 efx->n_tx_channels) {
1291 efx_for_each_channel_tx_queue(tx_queue, channel) 1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1292 tx_queue->channel = NULL; 1286 tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 70e4f7dcce81..61ddd2c6e750 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1107,22 +1107,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1107 1107
1108 /* Restore PCI configuration if needed */ 1108 /* Restore PCI configuration if needed */
1109 if (method == RESET_TYPE_WORLD) { 1109 if (method == RESET_TYPE_WORLD) {
1110 if (efx_nic_is_dual_func(efx)) { 1110 if (efx_nic_is_dual_func(efx))
1111 rc = pci_restore_state(nic_data->pci_dev2); 1111 pci_restore_state(nic_data->pci_dev2);
1112 if (rc) { 1112 pci_restore_state(efx->pci_dev);
1113 netif_err(efx, drv, efx->net_dev,
1114 "failed to restore PCI config for "
1115 "the secondary function\n");
1116 goto fail3;
1117 }
1118 }
1119 rc = pci_restore_state(efx->pci_dev);
1120 if (rc) {
1121 netif_err(efx, drv, efx->net_dev,
1122 "failed to restore PCI config for the "
1123 "primary function\n");
1124 goto fail4;
1125 }
1126 netif_dbg(efx, drv, efx->net_dev, 1113 netif_dbg(efx, drv, efx->net_dev,
1127 "successfully restored PCI config\n"); 1114 "successfully restored PCI config\n");
1128 } 1115 }
@@ -1133,7 +1120,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1133 rc = -ETIMEDOUT; 1120 rc = -ETIMEDOUT;
1134 netif_err(efx, hw, efx->net_dev, 1121 netif_err(efx, hw, efx->net_dev,
1135 "timed out waiting for hardware reset\n"); 1122 "timed out waiting for hardware reset\n");
1136 goto fail5; 1123 goto fail3;
1137 } 1124 }
1138 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); 1125 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1139 1126
@@ -1141,11 +1128,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1141 1128
1142 /* pci_save_state() and pci_restore_state() MUST be called in pairs */ 1129 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1143fail2: 1130fail2:
1144fail3:
1145 pci_restore_state(efx->pci_dev); 1131 pci_restore_state(efx->pci_dev);
1146fail1: 1132fail1:
1147fail4: 1133fail3:
1148fail5:
1149 return rc; 1134 return rc;
1150} 1135}
1151 1136
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66ddf93a..28df8665256a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@ struct efx_nic {
735 unsigned next_buffer_table; 735 unsigned next_buffer_table;
736 unsigned n_channels; 736 unsigned n_channels;
737 unsigned n_rx_channels; 737 unsigned n_rx_channels;
738 unsigned tx_channel_offset;
738 unsigned n_tx_channels; 739 unsigned n_tx_channels;
739 unsigned int rx_buffer_len; 740 unsigned int rx_buffer_len;
740 unsigned int rx_buffer_order; 741 unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
929 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 930 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
930 (_efx)->channel[_channel->channel + 1] : NULL) 931 (_efx)->channel[_channel->channel + 1] : NULL)
931 932
932extern struct efx_tx_queue * 933static inline struct efx_tx_queue *
933efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); 934efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
935{
936 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
937 type >= EFX_TXQ_TYPES);
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939}
934 940
935static inline struct efx_tx_queue * 941static inline struct efx_tx_queue *
936efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df1..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n", 1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1778 net_dev->name, sis_priv->cur_rx, 1778 net_dev->name, sis_priv->cur_rx,
1779 sis_priv->dirty_rx); 1779 sis_priv->dirty_rx);
1780 dev_kfree_skb(skb);
1780 break; 1781 break;
1781 } 1782 }
1782 1783
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f69998..93b32d366611 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -60,12 +60,6 @@
60#define BAR_0 0 60#define BAR_0 0
61#define BAR_2 2 61#define BAR_2 2
62 62
63#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
64#define TG3_VLAN_TAG_USED 1
65#else
66#define TG3_VLAN_TAG_USED 0
67#endif
68
69#include "tg3.h" 63#include "tg3.h"
70 64
71#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
@@ -134,9 +128,6 @@
134 TG3_TX_RING_SIZE) 128 TG3_TX_RING_SIZE)
135#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 130
137#define TG3_RX_DMA_ALIGN 16
138#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139
140#define TG3_DMA_BYTE_ENAB 64 131#define TG3_DMA_BYTE_ENAB 64
141 132
142#define TG3_RX_STD_DMA_SZ 1536 133#define TG3_RX_STD_DMA_SZ 1536
@@ -4722,8 +4713,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4722 struct sk_buff *skb; 4713 struct sk_buff *skb;
4723 dma_addr_t dma_addr; 4714 dma_addr_t dma_addr;
4724 u32 opaque_key, desc_idx, *post_ptr; 4715 u32 opaque_key, desc_idx, *post_ptr;
4725 bool hw_vlan __maybe_unused = false;
4726 u16 vtag __maybe_unused = 0;
4727 4716
4728 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4717 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4729 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4718 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4771,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4782 tg3_recycle_rx(tnapi, tpr, opaque_key, 4771 tg3_recycle_rx(tnapi, tpr, opaque_key,
4783 desc_idx, *post_ptr); 4772 desc_idx, *post_ptr);
4784 4773
4785 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + 4774 copy_skb = netdev_alloc_skb(tp->dev, len +
4786 TG3_RAW_IP_ALIGN); 4775 TG3_RAW_IP_ALIGN);
4787 if (copy_skb == NULL) 4776 if (copy_skb == NULL)
4788 goto drop_it_no_recycle; 4777 goto drop_it_no_recycle;
4789 4778
4790 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); 4779 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4791 skb_put(copy_skb, len); 4780 skb_put(copy_skb, len);
4792 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4781 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4793 skb_copy_from_linear_data(skb, copy_skb->data, len); 4782 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4803,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4814 } 4803 }
4815 4804
4816 if (desc->type_flags & RXD_FLAG_VLAN && 4805 if (desc->type_flags & RXD_FLAG_VLAN &&
4817 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { 4806 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4818 vtag = desc->err_vlan & RXD_VLAN_MASK; 4807 __vlan_hwaccel_put_tag(skb,
4819#if TG3_VLAN_TAG_USED 4808 desc->err_vlan & RXD_VLAN_MASK);
4820 if (tp->vlgrp)
4821 hw_vlan = true;
4822 else
4823#endif
4824 {
4825 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4826 __skb_push(skb, VLAN_HLEN);
4827
4828 memmove(ve, skb->data + VLAN_HLEN,
4829 ETH_ALEN * 2);
4830 ve->h_vlan_proto = htons(ETH_P_8021Q);
4831 ve->h_vlan_TCI = htons(vtag);
4832 }
4833 }
4834 4809
4835#if TG3_VLAN_TAG_USED 4810 napi_gro_receive(&tnapi->napi, skb);
4836 if (hw_vlan)
4837 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4838 else
4839#endif
4840 napi_gro_receive(&tnapi->napi, skb);
4841 4811
4842 received++; 4812 received++;
4843 budget--; 4813 budget--;
@@ -5740,11 +5710,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5740 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5710 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5741 } 5711 }
5742 5712
5743#if TG3_VLAN_TAG_USED
5744 if (vlan_tx_tag_present(skb)) 5713 if (vlan_tx_tag_present(skb))
5745 base_flags |= (TXD_FLAG_VLAN | 5714 base_flags |= (TXD_FLAG_VLAN |
5746 (vlan_tx_tag_get(skb) << 16)); 5715 (vlan_tx_tag_get(skb) << 16));
5747#endif
5748 5716
5749 len = skb_headlen(skb); 5717 len = skb_headlen(skb);
5750 5718
@@ -5986,11 +5954,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5986 } 5954 }
5987 } 5955 }
5988 } 5956 }
5989#if TG3_VLAN_TAG_USED 5957
5990 if (vlan_tx_tag_present(skb)) 5958 if (vlan_tx_tag_present(skb))
5991 base_flags |= (TXD_FLAG_VLAN | 5959 base_flags |= (TXD_FLAG_VLAN |
5992 (vlan_tx_tag_get(skb) << 16)); 5960 (vlan_tx_tag_get(skb) << 16));
5993#endif
5994 5961
5995 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5962 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5996 !mss && skb->len > VLAN_ETH_FRAME_LEN) 5963 !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -9532,17 +9499,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9532 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 9499 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9533 RX_MODE_KEEP_VLAN_TAG); 9500 RX_MODE_KEEP_VLAN_TAG);
9534 9501
9502#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9535 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9503 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9536 * flag clear. 9504 * flag clear.
9537 */ 9505 */
9538#if TG3_VLAN_TAG_USED
9539 if (!tp->vlgrp &&
9540 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9541 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9542#else
9543 /* By definition, VLAN is disabled always in this
9544 * case.
9545 */
9546 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9506 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9547 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9507 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9548#endif 9508#endif
@@ -11230,31 +11190,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11230 return -EOPNOTSUPP; 11190 return -EOPNOTSUPP;
11231} 11191}
11232 11192
11233#if TG3_VLAN_TAG_USED
11234static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11235{
11236 struct tg3 *tp = netdev_priv(dev);
11237
11238 if (!netif_running(dev)) {
11239 tp->vlgrp = grp;
11240 return;
11241 }
11242
11243 tg3_netif_stop(tp);
11244
11245 tg3_full_lock(tp, 0);
11246
11247 tp->vlgrp = grp;
11248
11249 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11250 __tg3_set_rx_mode(dev);
11251
11252 tg3_netif_start(tp);
11253
11254 tg3_full_unlock(tp);
11255}
11256#endif
11257
11258static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 11193static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11259{ 11194{
11260 struct tg3 *tp = netdev_priv(dev); 11195 struct tg3 *tp = netdev_priv(dev);
@@ -13066,9 +13001,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13066 13001
13067static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 13002static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
13068{ 13003{
13069#if TG3_VLAN_TAG_USED
13070 dev->vlan_features |= flags; 13004 dev->vlan_features |= flags;
13071#endif
13072} 13005}
13073 13006
13074static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13007static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13861,11 +13794,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13861 else 13794 else
13862 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13795 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13863 13796
13864 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; 13797 tp->rx_offset = NET_IP_ALIGN;
13865 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 13798 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13867 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 13800 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13868 tp->rx_offset -= NET_IP_ALIGN; 13801 tp->rx_offset = 0;
13869#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 13802#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13870 tp->rx_copy_thresh = ~(u16)0; 13803 tp->rx_copy_thresh = ~(u16)0;
13871#endif 13804#endif
@@ -14629,9 +14562,6 @@ static const struct net_device_ops tg3_netdev_ops = {
14629 .ndo_do_ioctl = tg3_ioctl, 14562 .ndo_do_ioctl = tg3_ioctl,
14630 .ndo_tx_timeout = tg3_tx_timeout, 14563 .ndo_tx_timeout = tg3_tx_timeout,
14631 .ndo_change_mtu = tg3_change_mtu, 14564 .ndo_change_mtu = tg3_change_mtu,
14632#if TG3_VLAN_TAG_USED
14633 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14634#endif
14635#ifdef CONFIG_NET_POLL_CONTROLLER 14565#ifdef CONFIG_NET_POLL_CONTROLLER
14636 .ndo_poll_controller = tg3_poll_controller, 14566 .ndo_poll_controller = tg3_poll_controller,
14637#endif 14567#endif
@@ -14648,9 +14578,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14648 .ndo_do_ioctl = tg3_ioctl, 14578 .ndo_do_ioctl = tg3_ioctl,
14649 .ndo_tx_timeout = tg3_tx_timeout, 14579 .ndo_tx_timeout = tg3_tx_timeout,
14650 .ndo_change_mtu = tg3_change_mtu, 14580 .ndo_change_mtu = tg3_change_mtu,
14651#if TG3_VLAN_TAG_USED
14652 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14653#endif
14654#ifdef CONFIG_NET_POLL_CONTROLLER 14581#ifdef CONFIG_NET_POLL_CONTROLLER
14655 .ndo_poll_controller = tg3_poll_controller, 14582 .ndo_poll_controller = tg3_poll_controller,
14656#endif 14583#endif
@@ -14700,9 +14627,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14700 14627
14701 SET_NETDEV_DEV(dev, &pdev->dev); 14628 SET_NETDEV_DEV(dev, &pdev->dev);
14702 14629
14703#if TG3_VLAN_TAG_USED
14704 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 14630 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14705#endif
14706 14631
14707 tp = netdev_priv(dev); 14632 tp = netdev_priv(dev);
14708 tp->pdev = pdev; 14633 tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d937c82..f528243e1a4f 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2808,9 +2808,6 @@ struct tg3 {
2808 u32 rx_std_max_post; 2808 u32 rx_std_max_post;
2809 u32 rx_offset; 2809 u32 rx_offset;
2810 u32 rx_pkt_map_sz; 2810 u32 rx_pkt_map_sz;
2811#if TG3_VLAN_TAG_USED
2812 struct vlan_group *vlgrp;
2813#endif
2814 2811
2815 2812
2816 /* begin "everything else" cacheline(s) section */ 2813 /* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5ec65b..7cb301da7474 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
142MODULE_AUTHOR("Tilera"); 142MODULE_AUTHOR("Tilera");
143MODULE_LICENSE("GPL"); 143MODULE_LICENSE("GPL");
144 144
145
146#define IS_MULTICAST(mac_addr) \
147 (((u8 *)(mac_addr))[0] & 0x01)
148
149#define IS_BROADCAST(mac_addr) \
150 (((u16 *)(mac_addr))[0] == 0xffff)
151
152
153/* 145/*
154 * Queue of incoming packets for a specific cpu and device. 146 * Queue of incoming packets for a specific cpu and device.
155 * 147 *
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
795 /* 787 /*
796 * FIXME: Implement HW multicast filter. 788 * FIXME: Implement HW multicast filter.
797 */ 789 */
798 if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { 790 if (is_unicast_ether_addr(buf)) {
799 /* Filter packets not for our address. */ 791 /* Filter packets not for our address. */
800 const u8 *mine = dev->dev_addr; 792 const u8 *mine = dev->dev_addr;
801 filter = compare_ether_addr(mine, buf); 793 filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 73a3e0d93237..715e7b47e7e9 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2032,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
2032 netdev_for_each_mc_addr(ha, dev) { 2032 netdev_for_each_mc_addr(ha, dev) {
2033 /* Only support group multicast for now. 2033 /* Only support group multicast for now.
2034 */ 2034 */
2035 if (!(ha->addr[0] & 1)) 2035 if (!is_multicast_ether_addr(ha->addr))
2036 continue; 2036 continue;
2037 2037
2038 /* Ask CPM to run CRC and set bit in 2038 /* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104ab199..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cdc_ncm.c 2 * cdc_ncm.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2010 4 * Copyright (C) ST-Ericsson 2010-2011
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> 5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "30-Nov-2010" 57#define DRIVER_VERSION "7-Feb-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
77 */ 77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32 78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79 79
80/* Maximum amount of IN datagrams in NTB */
81#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
82
80/* Restart the timer, if amount of datagrams is less than given value */ 83/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 84#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82 85
@@ -85,11 +88,6 @@
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ 88 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) 89 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87 90
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data { 91struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16; 92 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16; 93 struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{ 196{
199 struct usb_cdc_notification req; 197 struct usb_cdc_notification req;
200 u32 val; 198 u32 val;
201 __le16 max_datagram_size;
202 u8 flags; 199 u8 flags;
203 u8 iface_no; 200 u8 iface_no;
204 int err; 201 int err;
202 u16 ntb_fmt_supported;
205 203
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 204 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207 205
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 221 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); 222 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 223 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
224 /* devices prior to NCM Errata shall set this field to zero */
225 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
226 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
226 227
227 if (ctx->func_desc != NULL) 228 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities; 229 flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
231 232
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 233 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 234 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n", 235 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 236 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags); 237 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
237 238
238 /* max count of tx datagrams without terminating NULL entry */ 239 /* max count of tx datagrams */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 240 if ((ctx->tx_max_datagrams == 0) ||
241 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
242 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240 243
241 /* verify maximum size of received NTB in bytes */ 244 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max < 245 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 246 pr_debug("Using min receive length=%d\n",
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { 247 USB_CDC_NCM_NTB_MIN_IN_SIZE);
248 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
249 }
250
251 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
245 pr_debug("Using default maximum receive length=%d\n", 252 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX); 253 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; 254 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 } 255 }
249 256
257 /* inform device about NTB input size changes */
258 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
259 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
260 USB_RECIP_INTERFACE;
261 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
262 req.wValue = 0;
263 req.wIndex = cpu_to_le16(iface_no);
264
265 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
266 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
267
268 req.wLength = 8;
269 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
270 ndp_in_sz.wNtbInMaxDatagrams =
271 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
272 ndp_in_sz.wReserved = 0;
273 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
274 1000);
275 } else {
276 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
277
278 req.wLength = 4;
279 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
280 NULL, 1000);
281 }
282
283 if (err)
284 pr_debug("Setting NTB Input Size failed\n");
285 }
286
250 /* verify maximum size of transmitted NTB in bytes */ 287 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max < 288 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 289 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
297 /* additional configuration */ 334 /* additional configuration */
298 335
299 /* set CRC Mode */ 336 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 337 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
301 req.bNotificationType = USB_CDC_SET_CRC_MODE; 338 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 339 USB_RECIP_INTERFACE;
303 req.wIndex = cpu_to_le16(iface_no); 340 req.bNotificationType = USB_CDC_SET_CRC_MODE;
304 req.wLength = 0; 341 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
305 342 req.wIndex = cpu_to_le16(iface_no);
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 343 req.wLength = 0;
307 if (err) 344
308 pr_debug("Setting CRC mode off failed\n"); 345 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
346 if (err)
347 pr_debug("Setting CRC mode off failed\n");
348 }
309 349
310 /* set NTB format */ 350 /* set NTB format, if both formats are supported */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 351 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 352 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 353 USB_RECIP_INTERFACE;
314 req.wIndex = cpu_to_le16(iface_no); 354 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
315 req.wLength = 0; 355 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
356 req.wIndex = cpu_to_le16(iface_no);
357 req.wLength = 0;
358
359 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
360 if (err)
361 pr_debug("Setting NTB format to 16-bit failed\n");
362 }
316 363
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 364 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320 365
321 /* set Max Datagram Size (MTU) */ 366 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 367 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 368 __le16 max_datagram_size;
324 req.wValue = 0; 369 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
325 req.wIndex = cpu_to_le16(iface_no); 370
326 req.wLength = cpu_to_le16(2); 371 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
372 USB_RECIP_INTERFACE;
373 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
374 req.wValue = 0;
375 req.wIndex = cpu_to_le16(iface_no);
376 req.wLength = cpu_to_le16(2);
377
378 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
379 1000);
380 if (err) {
381 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
382 CDC_NCM_MIN_DATAGRAM_SIZE);
383 } else {
384 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
385 /* Check Eth descriptor value */
386 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
387 if (ctx->max_datagram_size > eth_max_sz)
388 ctx->max_datagram_size = eth_max_sz;
389 } else {
390 if (ctx->max_datagram_size >
391 CDC_NCM_MAX_DATAGRAM_SIZE)
392 ctx->max_datagram_size =
393 CDC_NCM_MAX_DATAGRAM_SIZE;
394 }
327 395
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); 396 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
329 if (err) { 397 ctx->max_datagram_size =
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", 398 CDC_NCM_MIN_DATAGRAM_SIZE;
331 CDC_NCM_MIN_DATAGRAM_SIZE); 399
332 /* use default */ 400 /* if value changed, update device */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 401 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
334 } else { 402 USB_RECIP_INTERFACE;
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 403 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
404 req.wValue = 0;
405 req.wIndex = cpu_to_le16(iface_no);
406 req.wLength = 2;
407 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
408
409 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
410 0, NULL, 1000);
411 if (err)
412 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
413 }
336 414
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 } 415 }
342 416
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 417 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
466 540
467 ctx->ether_desc = 541 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf; 542 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu = 543 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 544 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472 545
473 if (dev->hard_mtu < 546 if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) 547 dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
475 dev->hard_mtu = 548 else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; 549 dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break; 550 break;
483 551
484 case USB_CDC_NCM_TYPE: 552 case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
628 u32 offset; 696 u32 offset;
629 u32 last_offset; 697 u32 last_offset;
630 u16 n = 0; 698 u16 n = 0;
631 u8 timeout = 0; 699 u8 ready2send = 0;
632 700
633 /* if there is a remaining skb, it gets priority */ 701 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL) 702 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb); 703 swap(skb, ctx->tx_rem_skb);
636 else 704 else
637 timeout = 1; 705 ready2send = 1;
638 706
639 /* 707 /*
640 * +----------------+ 708 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
682 750
683 for (; n < ctx->tx_max_datagrams; n++) { 751 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */ 752 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max) 753 if (offset >= ctx->tx_max) {
754 ready2send = 1;
686 break; 755 break;
687 756 }
688 /* compute maximum buffer size */ 757 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset; 758 rem = ctx->tx_max - offset;
690 759
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
711 } 780 }
712 ctx->tx_rem_skb = skb; 781 ctx->tx_rem_skb = skb;
713 skb = NULL; 782 skb = NULL;
714 783 ready2send = 1;
715 /* loop one more time */
716 timeout = 1;
717 } 784 }
718 break; 785 break;
719 } 786 }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
756 ctx->tx_curr_last_offset = last_offset; 823 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb; 824 goto exit_no_skb;
758 825
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { 826 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
760 /* wait for more frames */ 827 /* wait for more frames */
761 /* push variables */ 828 /* push variables */
762 ctx->tx_curr_skb = skb_out; 829 ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 880 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 881 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 882 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 883 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus); 884 ctx->tx_ndp_modulus);
818 885
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 886 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * 892 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16)); 893 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 894 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ 895 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
829 896
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, 897 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
831 &(ctx->tx_ncm.ndp16), 898 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16)); 899 sizeof(ctx->tx_ncm.ndp16));
833 900
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + 901 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
835 sizeof(ctx->tx_ncm.ndp16), 902 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16), 903 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) * 904 (ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
868 if (ctx->tx_timer_pending != 0) { 935 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--; 936 ctx->tx_timer_pending--;
870 restart = 1; 937 restart = 1;
871 } else 938 } else {
872 restart = 0; 939 restart = 0;
940 }
873 941
874 spin_unlock(&ctx->mtx); 942 spin_unlock(&ctx->mtx);
875 943
876 if (restart) 944 if (restart) {
945 spin_lock(&ctx->mtx);
877 cdc_ncm_tx_timeout_start(ctx); 946 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL) 947 spin_unlock(&ctx->mtx);
948 } else if (ctx->netdev != NULL) {
879 usbnet_start_xmit(NULL, ctx->netdev); 949 usbnet_start_xmit(NULL, ctx->netdev);
950 }
880} 951}
881 952
882static struct sk_buff * 953static struct sk_buff *
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 971 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL) 972 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1; 973 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904 974
905 /* Start timer, if there is a remaining skb */ 975 /* Start timer, if there is a remaining skb */
906 if (need_timer) 976 if (need_timer)
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
908 978
909 if (skb_out) 979 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 980 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
981
982 spin_unlock(&ctx->mtx);
911 return skb_out; 983 return skb_out;
912 984
913error: 985error:
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
956 goto error; 1028 goto error;
957 } 1029 }
958 1030
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); 1031 temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { 1032 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n"); 1033 pr_debug("invalid DPT16 index\n");
962 goto error; 1034 goto error;
@@ -1020,14 +1092,16 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1020 if (((offset + temp) > actlen) || 1092 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { 1093 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)" 1094 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n", 1095 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb); 1096 x, offset, temp, skb_in);
1025 if (!x) 1097 if (!x)
1026 goto error; 1098 goto error;
1027 break; 1099 break;
1028 1100
1029 } else { 1101 } else {
1030 skb = skb_clone(skb_in, GFP_ATOMIC); 1102 skb = skb_clone(skb_in, GFP_ATOMIC);
1103 if (!skb)
1104 goto error;
1031 skb->len = temp; 1105 skb->len = temp;
1032 skb->data = ((u8 *)skb_in->data) + offset; 1106 skb->data = ((u8 *)skb_in->data) + offset;
1033 skb_set_tail_pointer(skb, temp); 1107 skb_set_tail_pointer(skb, temp);
@@ -1041,10 +1115,10 @@ error:
1041 1115
1042static void 1116static void
1043cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, 1117cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1044 struct connection_speed_change *data) 1118 struct usb_cdc_speed_change *data)
1045{ 1119{
1046 uint32_t rx_speed = le32_to_cpu(data->USBitRate); 1120 uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1047 uint32_t tx_speed = le32_to_cpu(data->DSBitRate); 1121 uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1048 1122
1049 /* 1123 /*
1050 * Currently the USB-NET API does not support reporting the actual 1124 * Currently the USB-NET API does not support reporting the actual
@@ -1085,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1085 /* test for split data in 8-byte chunks */ 1159 /* test for split data in 8-byte chunks */
1086 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { 1160 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1087 cdc_ncm_speed_change(ctx, 1161 cdc_ncm_speed_change(ctx,
1088 (struct connection_speed_change *)urb->transfer_buffer); 1162 (struct usb_cdc_speed_change *)urb->transfer_buffer);
1089 return; 1163 return;
1090 } 1164 }
1091 1165
@@ -1113,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1113 break; 1187 break;
1114 1188
1115 case USB_CDC_NOTIFY_SPEED_CHANGE: 1189 case USB_CDC_NOTIFY_SPEED_CHANGE:
1116 if (urb->actual_length < 1190 if (urb->actual_length < (sizeof(*event) +
1117 (sizeof(*event) + sizeof(struct connection_speed_change))) 1191 sizeof(struct usb_cdc_speed_change)))
1118 set_bit(EVENT_STS_SPLIT, &dev->flags); 1192 set_bit(EVENT_STS_SPLIT, &dev->flags);
1119 else 1193 else
1120 cdc_ncm_speed_change(ctx, 1194 cdc_ncm_speed_change(ctx,
1121 (struct connection_speed_change *) &event[1]); 1195 (struct usb_cdc_speed_change *) &event[1]);
1122 break; 1196 break;
1123 1197
1124 default: 1198 default:
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
406 406
407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { 407 if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
408 err("Firmware too big: %zu", fw->size); 408 err("Firmware too big: %zu", fw->size);
409 release_firmware(fw);
409 return -ENOSPC; 410 return -ENOSPC;
410 } 411 }
411 data_len = fw->size; 412 data_len = fw->size;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
446 } 446 }
447} 447}
448 448
449static void virtnet_napi_enable(struct virtnet_info *vi)
450{
451 napi_enable(&vi->napi);
452
453 /* If all buffers were filled by other side before we napi_enabled, we
454 * won't get another interrupt, so process any outstanding packets
455 * now. virtnet_poll wants re-enable the queue, so we disable here.
456 * We synchronize against interrupts via NAPI_STATE_SCHED */
457 if (napi_schedule_prep(&vi->napi)) {
458 virtqueue_disable_cb(vi->rvq);
459 __napi_schedule(&vi->napi);
460 }
461}
462
449static void refill_work(struct work_struct *work) 463static void refill_work(struct work_struct *work)
450{ 464{
451 struct virtnet_info *vi; 465 struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
454 vi = container_of(work, struct virtnet_info, refill.work); 468 vi = container_of(work, struct virtnet_info, refill.work);
455 napi_disable(&vi->napi); 469 napi_disable(&vi->napi);
456 still_empty = !try_fill_recv(vi, GFP_KERNEL); 470 still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 napi_enable(&vi->napi); 471 virtnet_napi_enable(vi);
458 472
459 /* In theory, this can happen: if we don't get any buffers in 473 /* In theory, this can happen: if we don't get any buffers in
460 * we will *never* try to fill again. */ 474 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
638{ 652{
639 struct virtnet_info *vi = netdev_priv(dev); 653 struct virtnet_info *vi = netdev_priv(dev);
640 654
641 napi_enable(&vi->napi); 655 virtnet_napi_enable(vi);
642
643 /* If all buffers were filled by other side before we napi_enabled, we
644 * won't get another interrupt, so process any outstanding packets
645 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) {
648 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi);
650 }
651 return 0; 656 return 0;
652} 657}
653 658
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5b..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
48static int enable_mq = 1; 48static int enable_mq = 1;
49static int irq_share_mode; 49static int irq_share_mode;
50 50
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
51/* 54/*
52 * Enable/Disable the given intr 55 * Enable/Disable the given intr
53 */ 56 */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139{ 142{
140 u32 ret; 143 u32 ret;
141 int i; 144 int i;
145 unsigned long flags;
142 146
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
145 adapter->link_speed = ret >> 16; 152 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */ 153 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 190
184 /* Check if there is an error on xmit/recv queues */ 191 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock);
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS); 195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock);
188 197
189 for (i = 0; i < adapter->num_tx_queues; i++) 198 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped) 199 if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
804 skb_transport_header(skb))->doff * 4; 813 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else { 815 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
811 818
812 if (ctx->ipv4) { 819 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *) 820 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb); 821 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) { 822 if (iph->protocol == IPPROTO_TCP)
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *) 823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4; 824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) { 825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
826 ctx->l4_hdr_size = 831 ctx->l4_hdr_size =
827 sizeof(struct udphdr); 832 sizeof(struct tcphdr);
828 } else { 833 else
829 ctx->l4_hdr_size = 0; 834 ctx->l4_hdr_size = 0;
830 }
831 } else { 835 } else {
832 /* for simplicity, don't copy L4 headers */ 836 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0; 837 ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared; 1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1866 unsigned long flags;
1862 1867
1863 if (grp) { 1868 if (grp) {
1864 /* add vlan rx stripping. */ 1869 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i; 1871 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp; 1872 adapter->vlan_grp = grp;
1869 1873
1870 /* update FEATURES to device */
1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /* 1874 /*
1875 * Clear entire vfTable; then enable untagged pkts. 1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns 1876 * Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1880 vfTable[i] = 0; 1880 vfTable[i] = 0;
1881 1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1885 } else { 1887 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has " 1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1900 */ 1902 */
1901 vfTable[i] = 0; 1903 vfTable[i] = 0;
1902 } 1904 }
1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1906 /* update FEATURES to device */
1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 } 1909 }
1911 } 1910 }
1912} 1911}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{ 1938{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1941 unsigned long flags;
1942 1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946} 1948}
1947 1949
1948 1950
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{ 1953{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956 unsigned long flags;
1954 1957
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1958} 1963}
1959 1964
1960 1965
@@ -1985,6 +1990,7 @@ static void
1985vmxnet3_set_mc(struct net_device *netdev) 1990vmxnet3_set_mc(struct net_device *netdev)
1986{ 1991{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1993 unsigned long flags;
1988 struct Vmxnet3_RxFilterConf *rxConf = 1994 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf; 1995 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL; 1996 u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2020 rxConf->mfTablePA = 0; 2026 rxConf->mfTablePA = 0;
2021 } 2027 }
2022 2028
2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
2023 if (new_mode != rxConf->rxMode) { 2030 if (new_mode != rxConf->rxMode) {
2024 rxConf->rxMode = cpu_to_le32(new_mode); 2031 rxConf->rxMode = cpu_to_le32(new_mode);
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 2035
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2031 2039
2032 kfree(new_table); 2040 kfree(new_table);
2033} 2041}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2080 devRead->misc.uptFeatures |= UPT1_F_LRO; 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2082 } 2090 }
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2084 adapter->vlan_grp) {
2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2086 }
2087 2093
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2168 /* rx filter settings */ 2174 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0; 2175 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter); 2176 vmxnet3_restore_vlan(adapter);
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
2171 /* the rest are already zeroed */ 2179 /* the rest are already zeroed */
2172} 2180}
2173 2181
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{ 2185{
2178 int err, i; 2186 int err, i;
2179 u32 ret; 2187 u32 ret;
2188 unsigned long flags;
2180 2189
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name, 2191 " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2206 adapter->shared_pa)); 2215 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa)); 2217 adapter->shared_pa));
2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV); 2220 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2212 2223
2213 if (ret != 0) { 2224 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
2255void 2266void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{ 2268{
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2259} 2273}
2260 2274
2261 2275
@@ -2263,12 +2277,15 @@ int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{ 2278{
2265 int i; 2279 int i;
2280 unsigned long flags;
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0; 2282 return 0;
2268 2283
2269 2284
2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV); 2287 VMXNET3_CMD_QUIESCE_DEV);
2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2272 vmxnet3_disable_all_intrs(adapter); 2289 vmxnet3_disable_all_intrs(adapter);
2273 2290
2274 for (i = 0; i < adapter->num_rx_queues; i++) 2291 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz; 2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz); 2447 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size; 2449 comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695 break; 2712 break;
2696 } else { 2713 } else {
2697 /* If fails to enable required number of MSI-x vectors 2714 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event 2715 * try enabling minimum number of vectors required.
2699 */ 2716 */
2700 vectors = vector_threshold; 2717 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2718 u32 cfg; 2735 u32 cfg;
2719 2736
2720 /* intr settings */ 2737 /* intr settings */
2738 spin_lock(&adapter->cmd_lock);
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR); 2740 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock);
2724 adapter->intr.type = cfg & 0x3; 2743 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726 2745
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2755 */ 2774 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) { 2777 || adapter->num_rx_queues != 1) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n"); 2779 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1; 2780 adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2905 adapter->netdev = netdev; 2924 adapter->netdev = netdev;
2906 adapter->pdev = pdev; 2925 adapter->pdev = pdev;
2907 2926
2927 spin_lock_init(&adapter->cmd_lock);
2908 adapter->shared = pci_alloc_consistent(adapter->pdev, 2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared), 2929 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa); 2930 &adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
3108 u8 *arpreq; 3128 u8 *arpreq;
3109 struct in_device *in_dev; 3129 struct in_device *in_dev;
3110 struct in_ifaddr *ifa; 3130 struct in_ifaddr *ifa;
3131 unsigned long flags;
3111 int i = 0; 3132 int i = 0;
3112 3133
3113 if (!netif_running(netdev)) 3134 if (!netif_running(netdev))
3114 return 0; 3135 return 0;
3115 3136
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
3116 vmxnet3_disable_all_intrs(adapter); 3140 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter); 3141 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter); 3142 vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf)); 3213 pmConf));
3190 3214
3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG); 3217 VMXNET3_CMD_UPDATE_PMCFG);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3193 3219
3194 pci_save_state(pdev); 3220 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
3204static int 3230static int
3205vmxnet3_resume(struct device *device) 3231vmxnet3_resume(struct device *device)
3206{ 3232{
3207 int err; 3233 int err, i = 0;
3234 unsigned long flags;
3208 struct pci_dev *pdev = to_pci_dev(device); 3235 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3236 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
3232 3259
3233 pci_enable_wake(pdev, PCI_D0, 0); 3260 pci_enable_wake(pdev, PCI_D0, 0);
3234 3261
3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG); 3264 VMXNET3_CMD_UPDATE_PMCFG);
3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237 vmxnet3_alloc_intr_resources(adapter); 3266 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter); 3267 vmxnet3_request_irqs(adapter);
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
3239 vmxnet3_enable_all_intrs(adapter); 3270 vmxnet3_enable_all_intrs(adapter);
3240 3271
3241 return 0; 3272 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe7..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) 45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{ 46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
48 49
49 if (adapter->rxcsum != val) { 50 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val; 51 adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
56 adapter->shared->devRead.misc.uptFeatures &= 57 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM; 58 ~UPT1_F_RXCSUM;
58 59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
61 } 64 }
62 } 65 }
63 return 0; 66 return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
68static const struct vmxnet3_stat_desc 71static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = { 72vmxnet3_tq_dev_stats[] = {
70 /* description, offset */ 73 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 74 { "Tx Queue#", 0 },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81}; 85};
82 86
83/* per tq stats maintained by the driver */ 87/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc 88static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = { 89vmxnet3_tq_driver_stats[] = {
86 /* description, offset */ 90 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 91 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) }, 92 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) }, 94 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) }, 96 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) }, 98 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) }, 100 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats, 101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) }, 102 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) }, 104 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) }, 106 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) }, 108 oversized_hdr) },
105}; 109};
106 110
107/* per rq stats maintained by the device */ 111/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc 112static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = { 113vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 114 { "Rx Queue#", 0 },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120}; 125};
121 126
122/* per rq stats maintained by the driver */ 127/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc 128static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = { 129vmxnet3_rq_driver_stats[] = {
125 /* description, offset */ 130 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) }, 132 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats, 133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) }, 134 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) }, 136 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) }, 138 rx_buf_alloc_failure) },
134}; 139};
135 140
136/* gloabl stats maintained by the driver */ 141/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc 142static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = { 143vmxnet3_global_stats[] = {
139 /* description, offset */ 144 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter, 145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) } 146 tx_timeout_count) }
142}; 147};
143 148
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 156 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 157 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 158 struct net_device_stats *net_stats = &netdev->stats;
159 unsigned long flags;
154 int i; 160 int i;
155 161
156 adapter = netdev_priv(netdev); 162 adapter = netdev_priv(netdev);
157 163
158 /* Collect the dev stats into the shared area */ 164 /* Collect the dev stats into the shared area */
165 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
160 168
161 memset(net_stats, 0, sizeof(*net_stats)); 169 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) { 170 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
193static int 201static int
194vmxnet3_get_sset_count(struct net_device *netdev, int sset) 202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
195{ 203{
204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
196 switch (sset) { 205 switch (sset) {
197 case ETH_SS_STATS: 206 case ETH_SS_STATS:
198 return ARRAY_SIZE(vmxnet3_tq_dev_stats) + 207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
199 ARRAY_SIZE(vmxnet3_tq_driver_stats) + 208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
200 ARRAY_SIZE(vmxnet3_rq_dev_stats) + 209 adapter->num_tx_queues +
201 ARRAY_SIZE(vmxnet3_rq_driver_stats) + 210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
202 ARRAY_SIZE(vmxnet3_global_stats); 213 ARRAY_SIZE(vmxnet3_global_stats);
203 default: 214 default:
204 return -EOPNOTSUPP; 215 return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
206} 217}
207 218
208 219
220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
209static int 224static int
210vmxnet3_get_regs_len(struct net_device *netdev) 225vmxnet3_get_regs_len(struct net_device *netdev)
211{ 226{
212 return 20 * sizeof(u32); 227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
213} 230}
214 231
215 232
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
240static void 257static void
241vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
242{ 259{
260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
243 if (stringset == ETH_SS_STATS) { 261 if (stringset == ETH_SS_STATS) {
244 int i; 262 int i, j;
245 263 for (j = 0; j < adapter->num_tx_queues; j++) {
246 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
247 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
248 ETH_GSTRING_LEN); 266 ETH_GSTRING_LEN);
249 buf += ETH_GSTRING_LEN; 267 buf += ETH_GSTRING_LEN;
250 } 268 }
251 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { 269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
252 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 270 i++) {
253 ETH_GSTRING_LEN); 271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
254 buf += ETH_GSTRING_LEN; 272 ETH_GSTRING_LEN);
255 } 273 buf += ETH_GSTRING_LEN;
256 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 274 }
257 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
258 ETH_GSTRING_LEN);
259 buf += ETH_GSTRING_LEN;
260 } 275 }
261 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { 276
262 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 277 for (j = 0; j < adapter->num_rx_queues; j++) {
263 ETH_GSTRING_LEN); 278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
264 buf += ETH_GSTRING_LEN; 279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
265 } 289 }
290
266 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
267 memcpy(buf, vmxnet3_global_stats[i].desc, 292 memcpy(buf, vmxnet3_global_stats[i].desc,
268 ETH_GSTRING_LEN); 293 ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
277 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
278 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
279 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags;
280 306
281 if (data & ~ETH_FLAG_LRO) 307 if (data & ~ETH_FLAG_LRO)
282 return -EOPNOTSUPP; 308 return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 else 318 else
293 adapter->shared->devRead.misc.uptFeatures &= 319 adapter->shared->devRead.misc.uptFeatures &=
294 ~UPT1_F_LRO; 320 ~UPT1_F_LRO;
321 spin_lock_irqsave(&adapter->cmd_lock, flags);
295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
296 VMXNET3_CMD_UPDATE_FEATURE); 323 VMXNET3_CMD_UPDATE_FEATURE);
324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
297 } 325 }
298 return 0; 326 return 0;
299} 327}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
303 struct ethtool_stats *stats, u64 *buf) 331 struct ethtool_stats *stats, u64 *buf)
304{ 332{
305 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 unsigned long flags;
306 u8 *base; 335 u8 *base;
307 int i; 336 int i;
308 int j = 0; 337 int j = 0;
309 338
339 spin_lock_irqsave(&adapter->cmd_lock, flags);
310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
311 342
312 /* this does assume each counter is 64-bit wide */ 343 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */ 344 for (j = 0; j < adapter->num_tx_queues; j++) {
314 345 base = (u8 *)&adapter->tqd_start[j].stats;
315 base = (u8 *)&adapter->tqd_start[j].stats; 346 *buf++ = (u64)j;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 348 *buf++ = *(u64 *)(base +
318 349 vmxnet3_tq_dev_stats[i].offset);
319 base = (u8 *)&adapter->tx_queue[j].stats; 350
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 351 base = (u8 *)&adapter->tx_queue[j].stats;
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
322 353 *buf++ = *(u64 *)(base +
323 base = (u8 *)&adapter->rqd_start[j].stats; 354 vmxnet3_tq_driver_stats[i].offset);
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 355 }
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 356
327 base = (u8 *)&adapter->rx_queue[j].stats; 357 for (j = 0; j < adapter->num_tx_queues; j++) {
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 358 base = (u8 *)&adapter->rqd_start[j].stats;
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
363
364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
330 369
331 base = (u8 *)adapter; 370 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 378{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 380 u32 *buf = p;
342 int i = 0; 381 int i = 0, j = 0;
343 382
344 memset(p, 0, vmxnet3_get_regs_len(netdev)); 383 memset(p, 0, vmxnet3_get_regs_len(netdev));
345 384
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
348 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
349 388
350 /* make each ring use multiple of 16 bytes */ 389 /* make each ring use multiple of 16 bytes */
351/* TODO change this for multiple queues */ 390 for (i = 0; i < adapter->num_tx_queues; i++) {
352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill; 391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp; 392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen; 393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
355 buf[3] = 0; 394 buf[j++] = 0;
356 395
357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc; 396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
358 buf[5] = adapter->tx_queue[i].comp_ring.gen; 397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
359 buf[6] = adapter->tx_queue[i].stopped; 398 buf[j++] = adapter->tx_queue[i].stopped;
360 buf[7] = 0; 399 buf[j++] = 0;
361 400 }
362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; 401
363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; 402 for (i = 0; i < adapter->num_rx_queues; i++) {
364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen; 403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
365 buf[11] = 0; 404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
366 405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; 406 buf[j++] = 0;
368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; 407
369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen; 408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
370 buf[15] = 0; 409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
371 410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc; 411 buf[j++] = 0;
373 buf[17] = adapter->rx_queue[i].comp_ring.gen; 412
374 buf[18] = 0; 413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
375 buf[19] = 0; 414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
376} 419}
377 420
378 421
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p) 617 const struct ethtool_rxfh_indir *p)
575{ 618{
576 unsigned int i; 619 unsigned int i;
620 unsigned long flags;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579 623
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
592 for (i = 0; i < rssConf->indTableSize; i++) 636 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i]; 637 rssConf->indTable[i] = p->ring_index[i];
594 638
639 spin_lock_irqsave(&adapter->cmd_lock, flags);
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT); 641 VMXNET3_CMD_UPDATE_RSSIDT);
642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
597 643
598 return 0; 644 return 0;
599 645
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f03..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
289 289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ 292#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
293 293
294 294
295struct vmxnet3_intr { 295struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
318 struct vlan_group *vlan_grp; 318 struct vlan_group *vlan_grp;
319 struct vmxnet3_intr intr; 319 struct vmxnet3_intr intr;
320 spinlock_t cmd_lock;
320 struct Vmxnet3_DriverShared *shared; 321 struct Vmxnet3_DriverShared *shared;
321 struct Vmxnet3_PMConf *pm_conf; 322 struct Vmxnet3_PMConf *pm_conf;
322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 323 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..228d4f7a58af 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3690 if (status != VXGE_HW_OK) 3690 if (status != VXGE_HW_OK)
3691 goto exit; 3691 goto exit;
3692 3692
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table != 3694 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) 3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0; 3696 *data1 = 0;
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b568f1b0..c81a6512c683 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4120 "hotplug event.\n"); 4120 "hotplug event.\n");
4121 4121
4122out: 4122out:
4123 release_firmware(fw);
4123 return ret; 4124 return ret;
4124} 4125}
4125 4126
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 019a74d533a6..09ae4ef0fd51 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2294,6 +2294,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2294 int i; 2294 int i;
2295 bool needreset = false; 2295 bool needreset = false;
2296 2296
2297 mutex_lock(&sc->lock);
2298
2297 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { 2299 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2298 if (sc->txqs[i].setup) { 2300 if (sc->txqs[i].setup) {
2299 txq = &sc->txqs[i]; 2301 txq = &sc->txqs[i];
@@ -2321,6 +2323,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2321 ath5k_reset(sc, NULL, true); 2323 ath5k_reset(sc, NULL, true);
2322 } 2324 }
2323 2325
2326 mutex_unlock(&sc->lock);
2327
2324 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2328 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2325 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2329 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2326} 2330}
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0064be7ce5c9..21091c26a9a5 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
838 for (i = 0; i < qmax; i++) { 838 for (i = 0; i < qmax; i++) {
839 err = ath5k_hw_stop_tx_dma(ah, i); 839 err = ath5k_hw_stop_tx_dma(ah, i);
840 /* -EINVAL -> queue inactive */ 840 /* -EINVAL -> queue inactive */
841 if (err != -EINVAL) 841 if (err && err != -EINVAL)
842 return err; 842 return err;
843 } 843 }
844 844
845 return err; 845 return 0;
846} 846}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index e5f2b96a4c63..a702817daf72 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
86 if (!ah->ah_bwmode) { 86 if (!ah->ah_bwmode) {
87 dur = ieee80211_generic_frame_duration(sc->hw, 87 dur = ieee80211_generic_frame_duration(sc->hw,
88 NULL, len, rate); 88 NULL, len, rate);
89 return dur; 89 return le16_to_cpu(dur);
90 } 90 }
91 91
92 bitrate = rate->bitrate; 92 bitrate = rate->bitrate;
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
265 * what rate we should choose to TX ACKs. */ 265 * what rate we should choose to TX ACKs. */
266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
267 267
268 tx_time = le16_to_cpu(tx_time);
269
270 ath5k_hw_reg_write(ah, tx_time, reg); 268 ath5k_hw_reg_write(ah, tx_time, reg);
271 269
272 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 270 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa13e36..5e300bd3d264 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -679,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
679 679
680 /* Do NF cal only at longer intervals */ 680 /* Do NF cal only at longer intervals */
681 if (longcal || nfcal_pending) { 681 if (longcal || nfcal_pending) {
682 /* Do periodic PAOffset Cal */
683 ar9002_hw_pa_cal(ah, false);
684 ar9002_hw_olc_temp_compensation(ah);
685
686 /* 682 /*
687 * Get the value from the previous NF cal and update 683 * Get the value from the previous NF cal and update
688 * history buffer. 684 * history buffer.
@@ -697,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
697 ath9k_hw_loadnf(ah, ah->curchan); 693 ath9k_hw_loadnf(ah, ah->curchan);
698 } 694 }
699 695
700 if (longcal) 696 if (longcal) {
701 ath9k_hw_start_nfcal(ah, false); 697 ath9k_hw_start_nfcal(ah, false);
698 /* Do periodic PAOffset Cal */
699 ar9002_hw_pa_cal(ah, false);
700 ar9002_hw_olc_temp_compensation(ah);
701 }
702 } 702 }
703 703
704 return iscaldone; 704 return iscaldone;
@@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
954 &adc_dc_cal_multi_sample; 954 &adc_dc_cal_multi_sample;
955 } 955 }
956 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; 956 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
957
958 if (AR_SREV_9287(ah))
959 ah->supp_cals &= ~ADC_GAIN_CAL;
957 } 960 }
958} 961}
959 962
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f8a7771faee2..f44c84ab5dce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
426 } 426 }
427 427
428 /* WAR for ASPM system hang */ 428 /* WAR for ASPM system hang */
429 if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { 429 if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
430 val |= (AR_WA_BIT6 | AR_WA_BIT7); 430 val |= (AR_WA_BIT6 | AR_WA_BIT7);
431 }
432 431
433 if (AR_SREV_9285E_20(ah)) 432 if (AR_SREV_9285E_20(ah))
434 val |= AR_WA_BIT23; 433 val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 81f9cf294dec..9ecca93392e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = {
1842 1842
1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { 1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
1844 /* Addr allmodes */ 1844 /* Addr allmodes */
1845 {0x00004040, 0x08212e5e}, 1845 {0x00004040, 0x0821265e},
1846 {0x00004040, 0x0008003b}, 1846 {0x00004040, 0x0008003b},
1847 {0x00004044, 0x00000000}, 1847 {0x00004044, 0x00000000},
1848}; 1848};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6137634e46ca..06fb2c850535 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -146,8 +146,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
146 /* Sleep Setting */ 146 /* Sleep Setting */
147 147
148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
149 ar9300PciePhy_clkreq_enable_L1_2p2, 149 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
150 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), 150 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
151 2); 151 2);
152 152
153 /* Fast clock modal settings */ 153 /* Fast clock modal settings */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf54282..23838e37d45f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -218,6 +218,7 @@ struct ath_frame_info {
218struct ath_buf_state { 218struct ath_buf_state {
219 u8 bf_type; 219 u8 bf_type;
220 u8 bfs_paprd; 220 u8 bfs_paprd;
221 unsigned long bfs_paprd_timestamp;
221 enum ath9k_internal_frame_type bfs_ftype; 222 enum ath9k_internal_frame_type bfs_ftype;
222}; 223};
223 224
@@ -593,7 +594,6 @@ struct ath_softc {
593 struct work_struct paprd_work; 594 struct work_struct paprd_work;
594 struct work_struct hw_check_work; 595 struct work_struct hw_check_work;
595 struct completion paprd_complete; 596 struct completion paprd_complete;
596 bool paprd_pending;
597 597
598 u32 intrstatus; 598 u32 intrstatus;
599 u32 sc_flags; /* SC_OP_* */ 599 u32 sc_flags; /* SC_OP_* */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141f2006..749a93608664 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
226 eep->baseEepHeader.pwdclkind == 0) 226 eep->baseEepHeader.pwdclkind == 0)
227 ah->need_an_top2_fixup = 1; 227 ah->need_an_top2_fixup = 1;
228 228
229 if ((common->bus_ops->ath_bus_type == ATH_USB) &&
230 (AR_SREV_9280(ah)))
231 eep->modalHeader[0].xpaBiasLvl = 0;
232
229 return 0; 233 return 0;
230} 234}
231 235
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e87ed3..780ac5eac501 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -78,7 +78,7 @@ struct tx_frame_hdr {
78 u8 node_idx; 78 u8 node_idx;
79 u8 vif_idx; 79 u8 vif_idx;
80 u8 tidno; 80 u8 tidno;
81 u32 flags; /* ATH9K_HTC_TX_* */ 81 __be32 flags; /* ATH9K_HTC_TX_* */
82 u8 key_type; 82 u8 key_type;
83 u8 keyix; 83 u8 keyix;
84 u8 reserved[26]; 84 u8 reserved[26];
@@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
433void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, 433void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
434 enum htc_endpoint_id ep_id, bool txok); 434 enum htc_endpoint_id ep_id, bool txok);
435 435
436int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
436void ath9k_htc_station_work(struct work_struct *work); 437void ath9k_htc_station_work(struct work_struct *work);
437void ath9k_htc_aggr_work(struct work_struct *work); 438void ath9k_htc_aggr_work(struct work_struct *work);
438void ath9k_ani_work(struct work_struct *work);; 439void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 38433f9bfe59..0352f0994caa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
142{ 142{
143 ath9k_htc_exit_debug(priv->ah); 143 ath9k_htc_exit_debug(priv->ah);
144 ath9k_hw_deinit(priv->ah); 144 ath9k_hw_deinit(priv->ah);
145 tasklet_kill(&priv->swba_tasklet);
146 tasklet_kill(&priv->rx_tasklet);
147 tasklet_kill(&priv->tx_tasklet);
148 kfree(priv->ah); 145 kfree(priv->ah);
149 priv->ah = NULL; 146 priv->ah = NULL;
150} 147}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c938d16..6bb59958f71e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
301 301
302 priv->nstations++; 302 priv->nstations++;
303 303
304 /*
305 * Set chainmask etc. on the target.
306 */
307 ret = ath9k_htc_update_cap_target(priv);
308 if (ret)
309 ath_dbg(common, ATH_DBG_CONFIG,
310 "Failed to update capability in target\n");
311
312 priv->ah->is_monitoring = true;
313
304 return 0; 314 return 0;
305 315
306err_vif: 316err_vif:
@@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
328 } 338 }
329 339
330 priv->nstations--; 340 priv->nstations--;
341 priv->ah->is_monitoring = false;
331 342
332 return 0; 343 return 0;
333} 344}
@@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
419 return 0; 430 return 0;
420} 431}
421 432
422static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) 433int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
423{ 434{
424 struct ath9k_htc_cap_target tcap; 435 struct ath9k_htc_cap_target tcap;
425 int ret; 436 int ret;
@@ -1014,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1014 int ret = 0; 1025 int ret = 0;
1015 u8 cmd_rsp; 1026 u8 cmd_rsp;
1016 1027
1017 /* Cancel all the running timers/work .. */
1018 cancel_work_sync(&priv->fatal_work);
1019 cancel_work_sync(&priv->ps_work);
1020 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1021 ath9k_led_stop_brightness(priv);
1022
1023 mutex_lock(&priv->mutex); 1028 mutex_lock(&priv->mutex);
1024 1029
1025 if (priv->op_flags & OP_INVALID) { 1030 if (priv->op_flags & OP_INVALID) {
@@ -1033,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1033 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1038 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1034 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 1039 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1035 WMI_CMD(WMI_STOP_RECV_CMDID); 1040 WMI_CMD(WMI_STOP_RECV_CMDID);
1041
1042 tasklet_kill(&priv->swba_tasklet);
1043 tasklet_kill(&priv->rx_tasklet);
1044 tasklet_kill(&priv->tx_tasklet);
1045
1036 skb_queue_purge(&priv->tx_queue); 1046 skb_queue_purge(&priv->tx_queue);
1037 1047
1048 mutex_unlock(&priv->mutex);
1049
1050 /* Cancel all the running timers/work .. */
1051 cancel_work_sync(&priv->fatal_work);
1052 cancel_work_sync(&priv->ps_work);
1053 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1054 ath9k_led_stop_brightness(priv);
1055
1056 mutex_lock(&priv->mutex);
1057
1038 /* Remove monitor interface here */ 1058 /* Remove monitor interface here */
1039 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 1059 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1040 if (ath9k_htc_remove_monitor_interface(priv)) 1060 if (ath9k_htc_remove_monitor_interface(priv))
@@ -1186,6 +1206,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1186 } 1206 }
1187 } 1207 }
1188 1208
1209 /*
1210 * Monitor interface should be added before
1211 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
1212 */
1213 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1214 if (conf->flags & IEEE80211_CONF_MONITOR) {
1215 if (ath9k_htc_add_monitor_interface(priv))
1216 ath_err(common, "Failed to set monitor mode\n");
1217 else
1218 ath_dbg(common, ATH_DBG_CONFIG,
1219 "HW opmode set to Monitor mode\n");
1220 }
1221 }
1222
1189 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1223 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1190 struct ieee80211_channel *curchan = hw->conf.channel; 1224 struct ieee80211_channel *curchan = hw->conf.channel;
1191 int pos = curchan->hw_value; 1225 int pos = curchan->hw_value;
@@ -1221,16 +1255,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1221 ath_update_txpow(priv); 1255 ath_update_txpow(priv);
1222 } 1256 }
1223 1257
1224 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1225 if (conf->flags & IEEE80211_CONF_MONITOR) {
1226 if (ath9k_htc_add_monitor_interface(priv))
1227 ath_err(common, "Failed to set monitor mode\n");
1228 else
1229 ath_dbg(common, ATH_DBG_CONFIG,
1230 "HW opmode set to Monitor mode\n");
1231 }
1232 }
1233
1234 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1258 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1235 mutex_lock(&priv->htc_pm_lock); 1259 mutex_lock(&priv->htc_pm_lock);
1236 if (!priv->ps_idle) { 1260 if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 33f36029fa4f..7a5ffca21958 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -113,6 +113,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
113 113
114 if (ieee80211_is_data(fc)) { 114 if (ieee80211_is_data(fc)) {
115 struct tx_frame_hdr tx_hdr; 115 struct tx_frame_hdr tx_hdr;
116 u32 flags = 0;
116 u8 *qc; 117 u8 *qc;
117 118
118 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); 119 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -136,13 +137,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
136 /* Check for RTS protection */ 137 /* Check for RTS protection */
137 if (priv->hw->wiphy->rts_threshold != (u32) -1) 138 if (priv->hw->wiphy->rts_threshold != (u32) -1)
138 if (skb->len > priv->hw->wiphy->rts_threshold) 139 if (skb->len > priv->hw->wiphy->rts_threshold)
139 tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS; 140 flags |= ATH9K_HTC_TX_RTSCTS;
140 141
141 /* CTS-to-self */ 142 /* CTS-to-self */
142 if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) && 143 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
143 (priv->op_flags & OP_PROTECT_ENABLE)) 144 (priv->op_flags & OP_PROTECT_ENABLE))
144 tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY; 145 flags |= ATH9K_HTC_TX_CTSONLY;
145 146
147 tx_hdr.flags = cpu_to_be32(flags);
146 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); 148 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
147 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) 149 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
148 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; 150 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde978665e07..9f01e50d5cda 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -369,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
369 else 369 else
370 ah->config.ht_enable = 0; 370 ah->config.ht_enable = 0;
371 371
372 /* PAPRD needs some more work to be enabled */
373 ah->config.paprd_disable = 1;
374
372 ah->config.rx_intr_mitigation = true; 375 ah->config.rx_intr_mitigation = true;
373 ah->config.pcieSerDesWrite = true; 376 ah->config.pcieSerDesWrite = true;
374 377
@@ -436,9 +439,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
436 439
437static int ath9k_hw_post_init(struct ath_hw *ah) 440static int ath9k_hw_post_init(struct ath_hw *ah)
438{ 441{
442 struct ath_common *common = ath9k_hw_common(ah);
439 int ecode; 443 int ecode;
440 444
441 if (!AR_SREV_9271(ah)) { 445 if (common->bus_ops->ath_bus_type != ATH_USB) {
442 if (!ath9k_hw_chip_test(ah)) 446 if (!ath9k_hw_chip_test(ah))
443 return -ENODEV; 447 return -ENODEV;
444 } 448 }
@@ -1213,7 +1217,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1213 ah->txchainmask = common->tx_chainmask; 1217 ah->txchainmask = common->tx_chainmask;
1214 ah->rxchainmask = common->rx_chainmask; 1218 ah->rxchainmask = common->rx_chainmask;
1215 1219
1216 if (!ah->chip_fullsleep) { 1220 if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
1217 ath9k_hw_abortpcurecv(ah); 1221 ath9k_hw_abortpcurecv(ah);
1218 if (!ath9k_hw_stopdmarecv(ah)) { 1222 if (!ath9k_hw_stopdmarecv(ah)) {
1219 ath_dbg(common, ATH_DBG_XMIT, 1223 ath_dbg(common, ATH_DBG_XMIT,
@@ -1932,7 +1936,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1932 pCap->rx_status_len = sizeof(struct ar9003_rxs); 1936 pCap->rx_status_len = sizeof(struct ar9003_rxs);
1933 pCap->tx_desc_len = sizeof(struct ar9003_txc); 1937 pCap->tx_desc_len = sizeof(struct ar9003_txc);
1934 pCap->txs_len = sizeof(struct ar9003_txs); 1938 pCap->txs_len = sizeof(struct ar9003_txs);
1935 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 1939 if (!ah->config.paprd_disable &&
1940 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
1936 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 1941 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
1937 } else { 1942 } else {
1938 pCap->tx_desc_len = sizeof(struct ath_desc); 1943 pCap->tx_desc_len = sizeof(struct ath_desc);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 5a3dfec45e96..ea9fde670646 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -225,6 +225,7 @@ struct ath9k_ops_config {
225 u32 pcie_waen; 225 u32 pcie_waen;
226 u8 analog_shiftreg; 226 u8 analog_shiftreg;
227 u8 ht_enable; 227 u8 ht_enable;
228 u8 paprd_disable;
228 u32 ofdm_trig_low; 229 u32 ofdm_trig_low;
229 u32 ofdm_trig_high; 230 u32 ofdm_trig_high;
230 u32 cck_trig_high; 231 u32 cck_trig_high;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 767d8b86f1e1..087a6a95edd5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -598,8 +598,6 @@ err_btcoex:
598err_queues: 598err_queues:
599 ath9k_hw_deinit(ah); 599 ath9k_hw_deinit(ah);
600err_hw: 600err_hw:
601 tasklet_kill(&sc->intr_tq);
602 tasklet_kill(&sc->bcon_tasklet);
603 601
604 kfree(ah); 602 kfree(ah);
605 sc->sc_ah = NULL; 603 sc->sc_ah = NULL;
@@ -807,9 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
807 805
808 ath9k_hw_deinit(sc->sc_ah); 806 ath9k_hw_deinit(sc->sc_ah);
809 807
810 tasklet_kill(&sc->intr_tq);
811 tasklet_kill(&sc->bcon_tasklet);
812
813 kfree(sc->sc_ah); 808 kfree(sc->sc_ah);
814 sc->sc_ah = NULL; 809 sc->sc_ah = NULL;
815} 810}
@@ -824,6 +819,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
824 wiphy_rfkill_stop_polling(sc->hw->wiphy); 819 wiphy_rfkill_stop_polling(sc->hw->wiphy);
825 ath_deinit_leds(sc); 820 ath_deinit_leds(sc);
826 821
822 ath9k_ps_restore(sc);
823
827 for (i = 0; i < sc->num_sec_wiphy; i++) { 824 for (i = 0; i < sc->num_sec_wiphy; i++) {
828 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 825 struct ath_wiphy *aphy = sc->sec_wiphy[i];
829 if (aphy == NULL) 826 if (aphy == NULL)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f90a6ca94a76..da5c64597c1f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
325{ 325{
326 struct ieee80211_hw *hw = sc->hw; 326 struct ieee80211_hw *hw = sc->hw;
327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
328 struct ath_hw *ah = sc->sc_ah;
329 struct ath_common *common = ath9k_hw_common(ah);
328 struct ath_tx_control txctl; 330 struct ath_tx_control txctl;
329 int time_left; 331 int time_left;
330 332
@@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
340 tx_info->control.rates[1].idx = -1; 342 tx_info->control.rates[1].idx = -1;
341 343
342 init_completion(&sc->paprd_complete); 344 init_completion(&sc->paprd_complete);
343 sc->paprd_pending = true;
344 txctl.paprd = BIT(chain); 345 txctl.paprd = BIT(chain);
345 if (ath_tx_start(hw, skb, &txctl) != 0) 346
347 if (ath_tx_start(hw, skb, &txctl) != 0) {
348 ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
349 dev_kfree_skb_any(skb);
346 return false; 350 return false;
351 }
347 352
348 time_left = wait_for_completion_timeout(&sc->paprd_complete, 353 time_left = wait_for_completion_timeout(&sc->paprd_complete,
349 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); 354 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
350 sc->paprd_pending = false;
351 355
352 if (!time_left) 356 if (!time_left)
353 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, 357 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -592,14 +596,12 @@ void ath9k_tasklet(unsigned long data)
592 u32 status = sc->intrstatus; 596 u32 status = sc->intrstatus;
593 u32 rxmask; 597 u32 rxmask;
594 598
595 ath9k_ps_wakeup(sc);
596
597 if (status & ATH9K_INT_FATAL) { 599 if (status & ATH9K_INT_FATAL) {
598 ath_reset(sc, true); 600 ath_reset(sc, true);
599 ath9k_ps_restore(sc);
600 return; 601 return;
601 } 602 }
602 603
604 ath9k_ps_wakeup(sc);
603 spin_lock(&sc->sc_pcu_lock); 605 spin_lock(&sc->sc_pcu_lock);
604 606
605 if (!ath9k_hw_check_alive(ah)) 607 if (!ath9k_hw_check_alive(ah))
@@ -955,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
955 957
956 spin_unlock_bh(&sc->sc_pcu_lock); 958 spin_unlock_bh(&sc->sc_pcu_lock);
957 ath9k_ps_restore(sc); 959 ath9k_ps_restore(sc);
958
959 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
960} 960}
961 961
962int ath_reset(struct ath_softc *sc, bool retry_tx) 962int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -969,6 +969,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
969 /* Stop ANI */ 969 /* Stop ANI */
970 del_timer_sync(&common->ani.timer); 970 del_timer_sync(&common->ani.timer);
971 971
972 ath9k_ps_wakeup(sc);
972 spin_lock_bh(&sc->sc_pcu_lock); 973 spin_lock_bh(&sc->sc_pcu_lock);
973 974
974 ieee80211_stop_queues(hw); 975 ieee80211_stop_queues(hw);
@@ -1015,6 +1016,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1015 1016
1016 /* Start ANI */ 1017 /* Start ANI */
1017 ath_start_ani(common); 1018 ath_start_ani(common);
1019 ath9k_ps_restore(sc);
1018 1020
1019 return r; 1021 return r;
1020} 1022}
@@ -1309,6 +1311,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1309 1311
1310 spin_lock_bh(&sc->sc_pcu_lock); 1312 spin_lock_bh(&sc->sc_pcu_lock);
1311 1313
1314 /* prevent tasklets to enable interrupts once we disable them */
1315 ah->imask &= ~ATH9K_INT_GLOBAL;
1316
1312 /* make sure h/w will not generate any interrupt 1317 /* make sure h/w will not generate any interrupt
1313 * before setting the invalid flag. */ 1318 * before setting the invalid flag. */
1314 ath9k_hw_disable_interrupts(ah); 1319 ath9k_hw_disable_interrupts(ah);
@@ -1326,6 +1331,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1326 1331
1327 spin_unlock_bh(&sc->sc_pcu_lock); 1332 spin_unlock_bh(&sc->sc_pcu_lock);
1328 1333
1334 /* we can now sync irq and kill any running tasklets, since we already
1335 * disabled interrupts and not holding a spin lock */
1336 synchronize_irq(sc->irq);
1337 tasklet_kill(&sc->intr_tq);
1338 tasklet_kill(&sc->bcon_tasklet);
1339
1329 ath9k_ps_restore(sc); 1340 ath9k_ps_restore(sc);
1330 1341
1331 sc->ps_idle = true; 1342 sc->ps_idle = true;
@@ -1701,7 +1712,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1701skip_chan_change: 1712skip_chan_change:
1702 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1713 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1703 sc->config.txpowlimit = 2 * conf->power_level; 1714 sc->config.txpowlimit = 2 * conf->power_level;
1715 ath9k_ps_wakeup(sc);
1704 ath_update_txpow(sc); 1716 ath_update_txpow(sc);
1717 ath9k_ps_restore(sc);
1705 } 1718 }
1706 1719
1707 spin_lock_bh(&sc->wiphy_lock); 1720 spin_lock_bh(&sc->wiphy_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 332d1feb5c18..07b7804aec5b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, 1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd); 1726 bf->bf_state.bfs_paprd);
1727 1727
1728 if (txctl->paprd)
1729 bf->bf_state.bfs_paprd_timestamp = jiffies;
1730
1728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1731 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1729 } 1732 }
1730 1733
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1886 bf->bf_buf_addr = 0; 1889 bf->bf_buf_addr = 0;
1887 1890
1888 if (bf->bf_state.bfs_paprd) { 1891 if (bf->bf_state.bfs_paprd) {
1889 if (!sc->paprd_pending) 1892 if (time_after(jiffies,
1893 bf->bf_state.bfs_paprd_timestamp +
1894 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1890 dev_kfree_skb_any(skb); 1895 dev_kfree_skb_any(skb);
1891 else 1896 else
1892 complete(&sc->paprd_complete); 1897 complete(&sc->paprd_complete);
@@ -2113,9 +2118,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2113 if (needreset) { 2118 if (needreset) {
2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2119 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2115 "tx hung, resetting the chip\n"); 2120 "tx hung, resetting the chip\n");
2116 ath9k_ps_wakeup(sc);
2117 ath_reset(sc, true); 2121 ath_reset(sc, true);
2118 ath9k_ps_restore(sc);
2119 } 2122 }
2120 2123
2121 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2124 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); 564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
565 565
566 /* 2. Maybe the AP wants to send multicast/broadcast data? */ 566 /* 2. Maybe the AP wants to send multicast/broadcast data? */
567 cam = !!(tim_ie->bitmap_ctrl & 0x01); 567 cam |= !!(tim_ie->bitmap_ctrl & 0x01);
568 568
569 if (!cam) { 569 if (!cam) {
570 /* back to low-power land. */ 570 /* back to low-power land. */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a4134edeb..2176edede39b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
518 hw_priv->link = link; 518 hw_priv->link = link;
519 519
520 /* 520 /*
521 * Make sure the IRQ handler cannot proceed until at least 521 * We enable IRQ here, but IRQ handler will not proceed
522 * dev->base_addr is initialized. 522 * until dev->base_addr is set below. This protect us from
523 * receive interrupts when driver is not initialized.
523 */ 524 */
524 spin_lock_irqsave(&local->irq_init_lock, flags);
525
526 ret = pcmcia_request_irq(link, prism2_interrupt); 525 ret = pcmcia_request_irq(link, prism2_interrupt);
527 if (ret) 526 if (ret)
528 goto failed_unlock; 527 goto failed;
529 528
530 ret = pcmcia_enable_device(link); 529 ret = pcmcia_enable_device(link);
531 if (ret) 530 if (ret)
532 goto failed_unlock; 531 goto failed;
533 532
533 spin_lock_irqsave(&local->irq_init_lock, flags);
534 dev->irq = link->irq; 534 dev->irq = link->irq;
535 dev->base_addr = link->resource[0]->start; 535 dev->base_addr = link->resource[0]->start;
536
537 spin_unlock_irqrestore(&local->irq_init_lock, flags); 536 spin_unlock_irqrestore(&local->irq_init_lock, flags);
538 537
539 local->shutdown = 0; 538 local->shutdown = 0;
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
546 545
547 return ret; 546 return ret;
548 547
549 failed_unlock:
550 spin_unlock_irqrestore(&local->irq_init_lock, flags);
551 failed: 548 failed:
552 kfree(hw_priv); 549 kfree(hw_priv);
553 prism2_release((u_long)link); 550 prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f6f46f..ae438ed80c2f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1973 1973
1974 inta = ipw_read32(priv, IPW_INTA_RW); 1974 inta = ipw_read32(priv, IPW_INTA_RW);
1975 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1975 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1976
1977 if (inta == 0xFFFFFFFF) {
1978 /* Hardware disappeared */
1979 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1980 /* Only handle the cached INTA values */
1981 inta = 0;
1982 }
1976 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1983 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1977 1984
1978 /* Add any cached INTA values that need to be handled */ 1985 /* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3f1e5f1bf847..91a9f5253469 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2624,6 +2624,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2624 .fw_name_pre = IWL4965_FW_PRE, 2624 .fw_name_pre = IWL4965_FW_PRE,
2625 .ucode_api_max = IWL4965_UCODE_API_MAX, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX,
2626 .ucode_api_min = IWL4965_UCODE_API_MIN, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN,
2627 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2627 .valid_tx_ant = ANT_AB, 2628 .valid_tx_ant = ANT_AB,
2628 .valid_rx_ant = ANT_ABC, 2629 .valid_rx_ant = ANT_ABC,
2629 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2630 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bcd7ae0..ef36aff1bb43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
681 .fw_name_pre = IWL6050_FW_PRE, \ 681 .fw_name_pre = IWL6050_FW_PRE, \
682 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 682 .ucode_api_max = IWL6050_UCODE_API_MAX, \
683 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 683 .ucode_api_min = IWL6050_UCODE_API_MIN, \
684 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
685 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
684 .ops = &iwl6050_ops, \ 686 .ops = &iwl6050_ops, \
685 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 687 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
686 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 688 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 97906dd442e6..27b5a3eec9dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -152,11 +152,14 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
152 152
153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); 153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
154 154
155 priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> 155 if (!priv->cfg->sku) {
156 /* not using sku overwrite */
157 priv->cfg->sku =
158 ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
156 EEPROM_SKU_CAP_BAND_POS); 159 EEPROM_SKU_CAP_BAND_POS);
157 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) 160 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
158 priv->cfg->sku |= IWL_SKU_N; 161 priv->cfg->sku |= IWL_SKU_N;
159 162 }
160 if (!priv->cfg->sku) { 163 if (!priv->cfg->sku) {
161 IWL_ERR(priv, "Invalid device sku\n"); 164 IWL_ERR(priv, "Invalid device sku\n");
162 return -EINVAL; 165 return -EINVAL;
@@ -168,7 +171,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
168 /* not using .cfg overwrite */ 171 /* not using .cfg overwrite */
169 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 172 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
170 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 173 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
171 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 174 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
172 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { 175 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
173 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", 176 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
174 priv->cfg->valid_tx_ant, 177 priv->cfg->valid_tx_ant,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36335b1b54d4..c1cfd9952e52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1157 /* only Re-enable if disabled by irq */ 1157 /* only Re-enable if disabled by irq */
1158 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1158 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1159 iwl_enable_interrupts(priv); 1159 iwl_enable_interrupts(priv);
1160 /* Re-enable RF_KILL if it occurred */
1161 else if (handled & CSR_INT_BIT_RF_KILL)
1162 iwl_enable_rfkill_int(priv);
1160 1163
1161#ifdef CONFIG_IWLWIFI_DEBUG 1164#ifdef CONFIG_IWLWIFI_DEBUG
1162 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1165 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1371 /* only Re-enable if disabled by irq */ 1374 /* only Re-enable if disabled by irq */
1372 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1375 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1373 iwl_enable_interrupts(priv); 1376 iwl_enable_interrupts(priv);
1377 /* Re-enable RF_KILL if it occurred */
1378 else if (handled & CSR_INT_BIT_RF_KILL)
1379 iwl_enable_rfkill_int(priv);
1374} 1380}
1375 1381
1376/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ 1382/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69ebf2a94..5091d77e02ce 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); 126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
127 if (!ndev) { 127 if (!ndev) {
128 dev_err(dev, "no memory for network device instance\n"); 128 dev_err(dev, "no memory for network device instance\n");
129 ret = -ENOMEM;
129 goto out_priv; 130 goto out_priv;
130 } 131 }
131 132
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
138 GFP_KERNEL); 139 GFP_KERNEL);
139 if (!iwm->umac_profile) { 140 if (!iwm->umac_profile) {
140 dev_err(dev, "Couldn't alloc memory for profile\n"); 141 dev_err(dev, "Couldn't alloc memory for profile\n");
142 ret = -ENOMEM;
141 goto out_profile; 143 goto out_profile;
142 } 144 }
143 145
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a7dc7..f618b9623e5a 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
618 else 618 else
619 *burst_possible = false; 619 *burst_possible = false;
620 620
621 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 621 if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
622 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; 622 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
623 623
624 if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) 624 if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb72befc..be0ff78c1b16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
58 58
59 if (!fw || !fw->size || !fw->data) { 59 if (!fw || !fw->size || !fw->data) {
60 ERROR(rt2x00dev, "Failed to read Firmware.\n"); 60 ERROR(rt2x00dev, "Failed to read Firmware.\n");
61 release_firmware(fw);
61 return -ENOENT; 62 return -ENOENT;
62 } 63 }
63 64
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 73631c6fbb30..ace0b668c04e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -363,12 +363,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
363 struct rt2x00_dev *rt2x00dev = hw->priv; 363 struct rt2x00_dev *rt2x00dev = hw->priv;
364 364
365 if (pci_set_power_state(pci_dev, PCI_D0) || 365 if (pci_set_power_state(pci_dev, PCI_D0) ||
366 pci_enable_device(pci_dev) || 366 pci_enable_device(pci_dev)) {
367 pci_restore_state(pci_dev)) {
368 ERROR(rt2x00dev, "Failed to resume device.\n"); 367 ERROR(rt2x00dev, "Failed to resume device.\n");
369 return -EIO; 368 return -EIO;
370 } 369 }
371 370
371 pci_restore_state(pci_dev);
372 return rt2x00lib_resume(rt2x00dev); 372 return rt2x00lib_resume(rt2x00dev);
373} 373}
374EXPORT_SYMBOL_GPL(rt2x00pci_resume); 374EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 0b4e8590cbb7..029be3c6c030 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2446,6 +2446,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2446 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2447 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2448 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
2449 { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
2449 /* Qcom */ 2450 /* Qcom */
2450 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2451 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
2451 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, 2452 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index b8433f3a9bc2..62876cd5c41a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
726} 726}
727 727
728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, 728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
729 u8 efuse_data, u8 offset, int *bcontinual, 729 u8 efuse_data, u8 offset, int *bcontinual,
730 u8 *write_state, struct pgpkt_struct target_pkt, 730 u8 *write_state, struct pgpkt_struct *target_pkt,
731 int *repeat_times, int *bresult, u8 word_en) 731 int *repeat_times, int *bresult, u8 word_en)
732{ 732{
733 struct rtl_priv *rtlpriv = rtl_priv(hw); 733 struct rtl_priv *rtlpriv = rtl_priv(hw);
734 struct pgpkt_struct tmp_pkt; 734 struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
744 tmp_pkt.word_en = tmp_header & 0x0F; 744 tmp_pkt.word_en = tmp_header & 0x0F;
745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
746 746
747 if (tmp_pkt.offset != target_pkt.offset) { 747 if (tmp_pkt.offset != target_pkt->offset) {
748 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 748 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
749 *write_state = PG_STATE_HEADER; 749 *write_state = PG_STATE_HEADER;
750 } else { 750 } else {
751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { 751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
756 } 756 }
757 757
758 if (bdataempty == false) { 758 if (bdataempty == false) {
759 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 759 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
760 *write_state = PG_STATE_HEADER; 760 *write_state = PG_STATE_HEADER;
761 } else { 761 } else {
762 match_word_en = 0x0F; 762 match_word_en = 0x0F;
763 if (!((target_pkt.word_en & BIT(0)) | 763 if (!((target_pkt->word_en & BIT(0)) |
764 (tmp_pkt.word_en & BIT(0)))) 764 (tmp_pkt.word_en & BIT(0))))
765 match_word_en &= (~BIT(0)); 765 match_word_en &= (~BIT(0));
766 766
767 if (!((target_pkt.word_en & BIT(1)) | 767 if (!((target_pkt->word_en & BIT(1)) |
768 (tmp_pkt.word_en & BIT(1)))) 768 (tmp_pkt.word_en & BIT(1))))
769 match_word_en &= (~BIT(1)); 769 match_word_en &= (~BIT(1));
770 770
771 if (!((target_pkt.word_en & BIT(2)) | 771 if (!((target_pkt->word_en & BIT(2)) |
772 (tmp_pkt.word_en & BIT(2)))) 772 (tmp_pkt.word_en & BIT(2))))
773 match_word_en &= (~BIT(2)); 773 match_word_en &= (~BIT(2));
774 774
775 if (!((target_pkt.word_en & BIT(3)) | 775 if (!((target_pkt->word_en & BIT(3)) |
776 (tmp_pkt.word_en & BIT(3)))) 776 (tmp_pkt.word_en & BIT(3))))
777 match_word_en &= (~BIT(3)); 777 match_word_en &= (~BIT(3));
778 778
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
780 badworden = efuse_word_enable_data_write( 780 badworden = efuse_word_enable_data_write(
781 hw, *efuse_addr + 1, 781 hw, *efuse_addr + 1,
782 tmp_pkt.word_en, 782 tmp_pkt.word_en,
783 target_pkt.data); 783 target_pkt->data);
784 784
785 if (0x0F != (badworden & 0x0F)) { 785 if (0x0F != (badworden & 0x0F)) {
786 u8 reorg_offset = offset; 786 u8 reorg_offset = offset;
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
791 } 791 }
792 792
793 tmp_word_en = 0x0F; 793 tmp_word_en = 0x0F;
794 if ((target_pkt.word_en & BIT(0)) ^ 794 if ((target_pkt->word_en & BIT(0)) ^
795 (match_word_en & BIT(0))) 795 (match_word_en & BIT(0)))
796 tmp_word_en &= (~BIT(0)); 796 tmp_word_en &= (~BIT(0));
797 797
798 if ((target_pkt.word_en & BIT(1)) ^ 798 if ((target_pkt->word_en & BIT(1)) ^
799 (match_word_en & BIT(1))) 799 (match_word_en & BIT(1)))
800 tmp_word_en &= (~BIT(1)); 800 tmp_word_en &= (~BIT(1));
801 801
802 if ((target_pkt.word_en & BIT(2)) ^ 802 if ((target_pkt->word_en & BIT(2)) ^
803 (match_word_en & BIT(2))) 803 (match_word_en & BIT(2)))
804 tmp_word_en &= (~BIT(2)); 804 tmp_word_en &= (~BIT(2));
805 805
806 if ((target_pkt.word_en & BIT(3)) ^ 806 if ((target_pkt->word_en & BIT(3)) ^
807 (match_word_en & BIT(3))) 807 (match_word_en & BIT(3)))
808 tmp_word_en &= (~BIT(3)); 808 tmp_word_en &= (~BIT(3));
809 809
810 if ((tmp_word_en & 0x0F) != 0x0F) { 810 if ((tmp_word_en & 0x0F) != 0x0F) {
811 *efuse_addr = efuse_get_current_size(hw); 811 *efuse_addr = efuse_get_current_size(hw);
812 target_pkt.offset = offset; 812 target_pkt->offset = offset;
813 target_pkt.word_en = tmp_word_en; 813 target_pkt->word_en = tmp_word_en;
814 } else 814 } else
815 *bcontinual = false; 815 *bcontinual = false;
816 *write_state = PG_STATE_HEADER; 816 *write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
821 } 821 }
822 } else { 822 } else {
823 *efuse_addr += (2 * tmp_word_cnts) + 1; 823 *efuse_addr += (2 * tmp_word_cnts) + 1;
824 target_pkt.offset = offset; 824 target_pkt->offset = offset;
825 target_pkt.word_en = word_en; 825 target_pkt->word_en = word_en;
826 *write_state = PG_STATE_HEADER; 826 *write_state = PG_STATE_HEADER;
827 } 827 }
828 } 828 }
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
938 efuse_write_data_case1(hw, &efuse_addr, 938 efuse_write_data_case1(hw, &efuse_addr,
939 efuse_data, offset, 939 efuse_data, offset,
940 &bcontinual, 940 &bcontinual,
941 &write_state, target_pkt, 941 &write_state, &target_pkt,
942 &repeat_times, &bresult, 942 &repeat_times, &bresult,
943 word_en); 943 word_en);
944 else 944 else
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0fa36aa6701a..1758d4463247 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -619,6 +619,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
619 struct sk_buff *uskb = NULL; 619 struct sk_buff *uskb = NULL;
620 u8 *pdata; 620 u8 *pdata;
621 uskb = dev_alloc_skb(skb->len + 128); 621 uskb = dev_alloc_skb(skb->len + 128);
622 if (!uskb) {
623 RT_TRACE(rtlpriv,
624 (COMP_INTR | COMP_RECV),
625 DBG_EMERG,
626 ("can't alloc rx skb\n"));
627 goto done;
628 }
622 memcpy(IEEE80211_SKB_RXCB(uskb), 629 memcpy(IEEE80211_SKB_RXCB(uskb),
623 &rx_status, 630 &rx_status,
624 sizeof(rx_status)); 631 sizeof(rx_status));
@@ -641,7 +648,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
641 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 648 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
642 if (unlikely(!new_skb)) { 649 if (unlikely(!new_skb)) {
643 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), 650 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
644 DBG_DMESG, 651 DBG_EMERG,
645 ("can't alloc skb for rx\n")); 652 ("can't alloc skb for rx\n"));
646 goto done; 653 goto done;
647 } 654 }
@@ -1066,9 +1073,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1066 struct sk_buff *skb = 1073 struct sk_buff *skb =
1067 dev_alloc_skb(rtlpci->rxbuffersize); 1074 dev_alloc_skb(rtlpci->rxbuffersize);
1068 u32 bufferaddress; 1075 u32 bufferaddress;
1069 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1070 if (!skb) 1076 if (!skb)
1071 return 0; 1077 return 0;
1078 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1072 1079
1073 /*skb->dev = dev; */ 1080 /*skb->dev = dev; */
1074 1081
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4016fe..40372bac9482 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1039 1039
1040 if (changed & BSS_CHANGED_BEACON) { 1040 if (changed & BSS_CHANGED_BEACON) {
1041 beacon = ieee80211_beacon_get(hw, vif); 1041 beacon = ieee80211_beacon_get(hw, vif);
1042 if (!beacon)
1043 goto out_sleep;
1044
1042 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, 1045 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
1043 beacon->len); 1046 beacon->len);
1044 1047
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 46714910f98c..7145ea543783 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 spi_message_add_tail(&t, &m); 110 spi_message_add_tail(&t, &m);
111 111
112 spi_sync(wl_to_spi(wl), &m); 112 spi_sync(wl_to_spi(wl), &m);
113 kfree(cmd);
114
115 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 113 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
114 kfree(cmd);
116} 115}
117 116
118static void wl1271_spi_init(struct wl1271 *wl) 117static void wl1271_spi_init(struct wl1271 *wl)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de5749824..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 int rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index 401c44b6eadb..bae647264dd6 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -69,7 +69,7 @@ struct pn544_info {
69 struct mutex read_mutex; /* Serialize read_irq access */ 69 struct mutex read_mutex; /* Serialize read_irq access */
70 struct mutex mutex; /* Serialize info struct access */ 70 struct mutex mutex; /* Serialize info struct access */
71 u8 *buf; 71 u8 *buf;
72 unsigned int buflen; 72 size_t buflen;
73}; 73};
74 74
75static const char reg_vdd_io[] = "Vdd_IO"; 75static const char reg_vdd_io[] = "Vdd_IO";
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c787c3d95c60..af824e7e0367 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -692,12 +692,6 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
692 return 1; 692 return 1;
693} 693}
694 694
695static void *__init early_device_tree_alloc(u64 size, u64 align)
696{
697 unsigned long mem = early_init_dt_alloc_memory_arch(size, align);
698 return __va(mem);
699}
700
701/** 695/**
702 * unflatten_device_tree - create tree of device_nodes from flat blob 696 * unflatten_device_tree - create tree of device_nodes from flat blob
703 * 697 *
@@ -709,7 +703,7 @@ static void *__init early_device_tree_alloc(u64 size, u64 align)
709void __init unflatten_device_tree(void) 703void __init unflatten_device_tree(void)
710{ 704{
711 __unflatten_device_tree(initial_boot_params, &allnodes, 705 __unflatten_device_tree(initial_boot_params, &allnodes,
712 early_device_tree_alloc); 706 early_init_dt_alloc_memory_arch);
713 707
714 /* Get pointer to OF "/chosen" node for use everywhere */ 708 /* Get pointer to OF "/chosen" node for use everywhere */
715 of_chosen = of_find_node_by_path("/chosen"); 709 of_chosen = of_find_node_by_path("/chosen");
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index a2d9d1e59260..a848e02e6be3 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -678,7 +678,7 @@ void parport_unregister_device(struct pardevice *dev)
678 678
679 /* Make sure we haven't left any pointers around in the wait 679 /* Make sure we haven't left any pointers around in the wait
680 * list. */ 680 * list. */
681 spin_lock (&port->waitlist_lock); 681 spin_lock_irq(&port->waitlist_lock);
682 if (dev->waitprev || dev->waitnext || port->waithead == dev) { 682 if (dev->waitprev || dev->waitnext || port->waithead == dev) {
683 if (dev->waitprev) 683 if (dev->waitprev)
684 dev->waitprev->waitnext = dev->waitnext; 684 dev->waitprev->waitnext = dev->waitnext;
@@ -689,7 +689,7 @@ void parport_unregister_device(struct pardevice *dev)
689 else 689 else
690 port->waittail = dev->waitprev; 690 port->waittail = dev->waitprev;
691 } 691 }
692 spin_unlock (&port->waitlist_lock); 692 spin_unlock_irq(&port->waitlist_lock);
693 693
694 kfree(dev->state); 694 kfree(dev->state);
695 kfree(dev); 695 kfree(dev);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7c24dcef2989..44b0aeee83e5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -168,8 +168,9 @@ static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag)
168 u32 mask_bits = desc->masked; 168 u32 mask_bits = desc->masked;
169 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 169 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
170 PCI_MSIX_ENTRY_VECTOR_CTRL; 170 PCI_MSIX_ENTRY_VECTOR_CTRL;
171 mask_bits &= ~1; 171 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
172 mask_bits |= flag; 172 if (flag)
173 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
173 writel(mask_bits, desc->mask_base + offset); 174 writel(mask_bits, desc->mask_base + offset);
174 175
175 return mask_bits; 176 return mask_bits;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index feff3bee6fe5..65c42f80f23e 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,12 +6,6 @@
6#ifndef MSI_H 6#ifndef MSI_H
7#define MSI_H 7#define MSI_H
8 8
9#define PCI_MSIX_ENTRY_SIZE 16
10#define PCI_MSIX_ENTRY_LOWER_ADDR 0
11#define PCI_MSIX_ENTRY_UPPER_ADDR 4
12#define PCI_MSIX_ENTRY_DATA 8
13#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
14
15#define msi_control_reg(base) (base + PCI_MSI_FLAGS) 9#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
16#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 10#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
17#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) 11#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 24e19c594e57..6fe0772e0e7d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -46,9 +46,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
46 struct pci_dev *pci_dev = context; 46 struct pci_dev *pci_dev = context;
47 47
48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
49 pci_wakeup_event(pci_dev);
49 pci_check_pme_status(pci_dev); 50 pci_check_pme_status(pci_dev);
50 pm_runtime_resume(&pci_dev->dev); 51 pm_runtime_resume(&pci_dev->dev);
51 pci_wakeup_event(pci_dev);
52 if (pci_dev->subordinate) 52 if (pci_dev->subordinate)
53 pci_pme_wakeup_bus(pci_dev->subordinate); 53 pci_pme_wakeup_bus(pci_dev->subordinate);
54 } 54 }
@@ -399,6 +399,7 @@ static int __init acpi_pci_init(void)
399 399
400 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { 400 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
401 printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); 401 printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
402 pcie_clear_aspm();
402 pcie_no_aspm(); 403 pcie_no_aspm();
403 } 404 }
404 405
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a6f797de8e5..88246dd46452 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -338,7 +338,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
338} 338}
339 339
340/** 340/**
341 * __pci_device_probe() 341 * __pci_device_probe - check if a driver wants to claim a specific PCI device
342 * @drv: driver to call to check if it wants the PCI device 342 * @drv: driver to call to check if it wants the PCI device
343 * @pci_dev: PCI device being probed 343 * @pci_dev: PCI device being probed
344 * 344 *
@@ -449,7 +449,8 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
449 return error; 449 return error;
450 } 450 }
451 451
452 return pci_restore_state(pci_dev); 452 pci_restore_state(pci_dev);
453 return 0;
453} 454}
454 455
455static void pci_pm_default_resume_early(struct pci_dev *pci_dev) 456static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index f7b68ca6cc98..775e933c2225 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -47,6 +47,10 @@ static int __init pci_stub_init(void)
47 if (rc) 47 if (rc)
48 return rc; 48 return rc;
49 49
50 /* no ids passed actually */
51 if (ids[0] == '\0')
52 return 0;
53
50 /* add ids specified in the module parameter */ 54 /* add ids specified in the module parameter */
51 p = ids; 55 p = ids;
52 while ((id = strsep(&p, ","))) { 56 while ((id = strsep(&p, ","))) {
@@ -54,6 +58,9 @@ static int __init pci_stub_init(void)
54 subdevice = PCI_ANY_ID, class=0, class_mask=0; 58 subdevice = PCI_ANY_ID, class=0, class_mask=0;
55 int fields; 59 int fields;
56 60
61 if (!strlen(id))
62 continue;
63
57 fields = sscanf(id, "%x:%x:%x:%x:%x:%x", 64 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
58 &vendor, &device, &subvendor, &subdevice, 65 &vendor, &device, &subvendor, &subdevice,
59 &class, &class_mask); 66 &class, &class_mask);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 63d5042f2079..8ecaac983923 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1149,7 +1149,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
1149 sysfs_bin_attr_init(attr); 1149 sysfs_bin_attr_init(attr);
1150 attr->size = rom_size; 1150 attr->size = rom_size;
1151 attr->attr.name = "rom"; 1151 attr->attr.name = "rom";
1152 attr->attr.mode = S_IRUSR; 1152 attr->attr.mode = S_IRUSR | S_IWUSR;
1153 attr->read = pci_read_rom; 1153 attr->read = pci_read_rom;
1154 attr->write = pci_write_rom; 1154 attr->write = pci_write_rom;
1155 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); 1155 retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 710c8a29be0d..b714d787bddd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -937,14 +937,13 @@ pci_save_state(struct pci_dev *dev)
937 * pci_restore_state - Restore the saved state of a PCI device 937 * pci_restore_state - Restore the saved state of a PCI device
938 * @dev: - PCI device that we're dealing with 938 * @dev: - PCI device that we're dealing with
939 */ 939 */
940int 940void pci_restore_state(struct pci_dev *dev)
941pci_restore_state(struct pci_dev *dev)
942{ 941{
943 int i; 942 int i;
944 u32 val; 943 u32 val;
945 944
946 if (!dev->state_saved) 945 if (!dev->state_saved)
947 return 0; 946 return;
948 947
949 /* PCI Express register must be restored first */ 948 /* PCI Express register must be restored first */
950 pci_restore_pcie_state(dev); 949 pci_restore_pcie_state(dev);
@@ -968,8 +967,6 @@ pci_restore_state(struct pci_dev *dev)
968 pci_restore_iov_state(dev); 967 pci_restore_iov_state(dev);
969 968
970 dev->state_saved = false; 969 dev->state_saved = false;
971
972 return 0;
973} 970}
974 971
975static int do_pci_enable_device(struct pci_dev *dev, int bars) 972static int do_pci_enable_device(struct pci_dev *dev, int bars)
@@ -1300,22 +1297,6 @@ bool pci_check_pme_status(struct pci_dev *dev)
1300 return ret; 1297 return ret;
1301} 1298}
1302 1299
1303/*
1304 * Time to wait before the system can be put into a sleep state after reporting
1305 * a wakeup event signaled by a PCI device.
1306 */
1307#define PCI_WAKEUP_COOLDOWN 100
1308
1309/**
1310 * pci_wakeup_event - Report a wakeup event related to a given PCI device.
1311 * @dev: Device to report the wakeup event for.
1312 */
1313void pci_wakeup_event(struct pci_dev *dev)
1314{
1315 if (device_may_wakeup(&dev->dev))
1316 pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
1317}
1318
1319/** 1300/**
1320 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1301 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1321 * @dev: Device to handle. 1302 * @dev: Device to handle.
@@ -1327,8 +1308,8 @@ void pci_wakeup_event(struct pci_dev *dev)
1327static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1308static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1328{ 1309{
1329 if (pci_check_pme_status(dev)) { 1310 if (pci_check_pme_status(dev)) {
1330 pm_request_resume(&dev->dev);
1331 pci_wakeup_event(dev); 1311 pci_wakeup_event(dev);
1312 pm_request_resume(&dev->dev);
1332 } 1313 }
1333 return 0; 1314 return 0;
1334} 1315}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7d33f6673868..f69d6e0fda75 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -74,6 +74,12 @@ extern void pci_pm_init(struct pci_dev *dev);
74extern void platform_pci_wakeup_init(struct pci_dev *dev); 74extern void platform_pci_wakeup_init(struct pci_dev *dev);
75extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 75extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
76 76
77static inline void pci_wakeup_event(struct pci_dev *dev)
78{
79 /* Wait 100 ms before the system can be put into a sleep state. */
80 pm_wakeup_event(&dev->dev, 100);
81}
82
77static inline bool pci_is_bridge(struct pci_dev *pci_dev) 83static inline bool pci_is_bridge(struct pci_dev *pci_dev)
78{ 84{
79 return !!(pci_dev->subordinate); 85 return !!(pci_dev->subordinate);
@@ -140,14 +146,6 @@ static inline void pci_no_msi(void) { }
140static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 146static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
141#endif 147#endif
142 148
143#ifdef CONFIG_PCIEAER
144void pci_no_aer(void);
145bool pci_aer_available(void);
146#else
147static inline void pci_no_aer(void) { }
148static inline bool pci_aer_available(void) { return false; }
149#endif
150
151static inline int pci_no_d1d2(struct pci_dev *dev) 149static inline int pci_no_d1d2(struct pci_dev *dev)
152{ 150{
153 unsigned int parent_dstates = 0; 151 unsigned int parent_dstates = 0;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda70981b7a6..dc29348264c6 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,7 +31,7 @@ source "drivers/pci/pcie/aer/Kconfig"
31# PCI Express ASPM 31# PCI Express ASPM
32# 32#
33config PCIEASPM 33config PCIEASPM
34 bool "PCI Express ASPM control" if EMBEDDED 34 bool "PCI Express ASPM control" if EXPERT
35 depends on PCI && PCIEPORTBUS 35 depends on PCI && PCIEPORTBUS
36 default y 36 default y
37 help 37 help
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2b2b6508efde..58ad7917553c 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/pci-acpi.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 9656e3060412..80c11d131499 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,7 +132,6 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
132 132
133#ifdef CONFIG_ACPI_APEI 133#ifdef CONFIG_ACPI_APEI
134extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); 134extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
135extern bool aer_acpi_firmware_first(void);
136#else 135#else
137static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) 136static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
138{ 137{
@@ -140,8 +139,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
140 return pci_dev->__aer_firmware_first; 139 return pci_dev->__aer_firmware_first;
141 return 0; 140 return 0;
142} 141}
143
144static inline bool aer_acpi_firmware_first(void) { return false; }
145#endif 142#endif
146 143
147static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, 144static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 71222814c1ec..3188cd96b338 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@ struct pcie_link_state {
68 struct aspm_latency acceptable[8]; 68 struct aspm_latency acceptable[8];
69}; 69};
70 70
71static int aspm_disabled, aspm_force; 71static int aspm_disabled, aspm_force, aspm_clear_state;
72static DEFINE_MUTEX(aspm_lock); 72static DEFINE_MUTEX(aspm_lock);
73static LIST_HEAD(link_list); 73static LIST_HEAD(link_list);
74 74
@@ -139,7 +139,7 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
139{ 139{
140 /* Don't enable Clock PM if the link is not Clock PM capable */ 140 /* Don't enable Clock PM if the link is not Clock PM capable */
141 if (!link->clkpm_capable && enable) 141 if (!link->clkpm_capable && enable)
142 return; 142 enable = 0;
143 /* Need nothing if the specified equals to current state */ 143 /* Need nothing if the specified equals to current state */
144 if (link->clkpm_enabled == enable) 144 if (link->clkpm_enabled == enable)
145 return; 145 return;
@@ -498,6 +498,10 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
498 struct pci_dev *child; 498 struct pci_dev *child;
499 int pos; 499 int pos;
500 u32 reg32; 500 u32 reg32;
501
502 if (aspm_clear_state)
503 return -EINVAL;
504
501 /* 505 /*
502 * Some functions in a slot might not all be PCIe functions, 506 * Some functions in a slot might not all be PCIe functions,
503 * very strange. Disable ASPM for the whole slot 507 * very strange. Disable ASPM for the whole slot
@@ -563,12 +567,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
563 struct pcie_link_state *link; 567 struct pcie_link_state *link;
564 int blacklist = !!pcie_aspm_sanity_check(pdev); 568 int blacklist = !!pcie_aspm_sanity_check(pdev);
565 569
566 if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state) 570 if (!pci_is_pcie(pdev) || pdev->link_state)
567 return; 571 return;
568 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 572 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
569 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 573 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
570 return; 574 return;
571 575
576 if (aspm_disabled && !aspm_clear_state)
577 return;
578
572 /* VIA has a strange chipset, root port is under a bridge */ 579 /* VIA has a strange chipset, root port is under a bridge */
573 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && 580 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
574 pdev->bus->self) 581 pdev->bus->self)
@@ -641,7 +648,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
641 struct pci_dev *parent = pdev->bus->self; 648 struct pci_dev *parent = pdev->bus->self;
642 struct pcie_link_state *link, *root, *parent_link; 649 struct pcie_link_state *link, *root, *parent_link;
643 650
644 if (aspm_disabled || !pci_is_pcie(pdev) || 651 if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
645 !parent || !parent->link_state) 652 !parent || !parent->link_state)
646 return; 653 return;
647 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 654 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
@@ -899,6 +906,12 @@ static int __init pcie_aspm_disable(char *str)
899 906
900__setup("pcie_aspm=", pcie_aspm_disable); 907__setup("pcie_aspm=", pcie_aspm_disable);
901 908
909void pcie_clear_aspm(void)
910{
911 if (!aspm_force)
912 aspm_clear_state = 1;
913}
914
902void pcie_no_aspm(void) 915void pcie_no_aspm(void)
903{ 916{
904 if (!aspm_force) 917 if (!aspm_force)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2f3c90407227..0057344a3fcb 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -26,9 +26,6 @@
26#include "../pci.h" 26#include "../pci.h"
27#include "portdrv.h" 27#include "portdrv.h"
28 28
29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
31
32/* 29/*
33 * If this switch is set, MSI will not be used for PCIe PME signaling. This 30 * If this switch is set, MSI will not be used for PCIe PME signaling. This
34 * causes the PCIe port driver to use INTx interrupts only, but it turns out 31 * causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -74,22 +71,6 @@ void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
74} 71}
75 72
76/** 73/**
77 * pcie_pme_clear_status - Clear root port PME interrupt status.
78 * @dev: PCIe root port or event collector.
79 */
80static void pcie_pme_clear_status(struct pci_dev *dev)
81{
82 int rtsta_pos;
83 u32 rtsta;
84
85 rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
86
87 pci_read_config_dword(dev, rtsta_pos, &rtsta);
88 rtsta |= PCI_EXP_RTSTA_PME;
89 pci_write_config_dword(dev, rtsta_pos, rtsta);
90}
91
92/**
93 * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. 74 * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
94 * @bus: PCI bus to scan. 75 * @bus: PCI bus to scan.
95 * 76 *
@@ -103,8 +84,8 @@ static bool pcie_pme_walk_bus(struct pci_bus *bus)
103 list_for_each_entry(dev, &bus->devices, bus_list) { 84 list_for_each_entry(dev, &bus->devices, bus_list) {
104 /* Skip PCIe devices in case we started from a root port. */ 85 /* Skip PCIe devices in case we started from a root port. */
105 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 86 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
106 pm_request_resume(&dev->dev);
107 pci_wakeup_event(dev); 87 pci_wakeup_event(dev);
88 pm_request_resume(&dev->dev);
108 ret = true; 89 ret = true;
109 } 90 }
110 91
@@ -206,8 +187,8 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
206 /* The device is there, but we have to check its PME status. */ 187 /* The device is there, but we have to check its PME status. */
207 found = pci_check_pme_status(dev); 188 found = pci_check_pme_status(dev);
208 if (found) { 189 if (found) {
209 pm_request_resume(&dev->dev);
210 pci_wakeup_event(dev); 190 pci_wakeup_event(dev);
191 pm_request_resume(&dev->dev);
211 } 192 }
212 pci_dev_put(dev); 193 pci_dev_put(dev);
213 } else if (devfn) { 194 } else if (devfn) {
@@ -253,7 +234,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
253 * Clear PME status of the port. If there are other 234 * Clear PME status of the port. If there are other
254 * pending PMEs, the status will be set again. 235 * pending PMEs, the status will be set again.
255 */ 236 */
256 pcie_pme_clear_status(port); 237 pcie_clear_root_pme_status(port);
257 238
258 spin_unlock_irq(&data->lock); 239 spin_unlock_irq(&data->lock);
259 pcie_pme_handle_request(port, rtsta & 0xffff); 240 pcie_pme_handle_request(port, rtsta & 0xffff);
@@ -378,7 +359,7 @@ static int pcie_pme_probe(struct pcie_device *srv)
378 359
379 port = srv->port; 360 port = srv->port;
380 pcie_pme_interrupt_enable(port, false); 361 pcie_pme_interrupt_enable(port, false);
381 pcie_pme_clear_status(port); 362 pcie_clear_root_pme_status(port);
382 363
383 ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); 364 ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
384 if (ret) { 365 if (ret) {
@@ -402,7 +383,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
402 383
403 spin_lock_irq(&data->lock); 384 spin_lock_irq(&data->lock);
404 pcie_pme_interrupt_enable(port, false); 385 pcie_pme_interrupt_enable(port, false);
405 pcie_pme_clear_status(port); 386 pcie_clear_root_pme_status(port);
406 data->noirq = true; 387 data->noirq = true;
407 spin_unlock_irq(&data->lock); 388 spin_unlock_irq(&data->lock);
408 389
@@ -422,7 +403,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
422 403
423 spin_lock_irq(&data->lock); 404 spin_lock_irq(&data->lock);
424 data->noirq = false; 405 data->noirq = false;
425 pcie_pme_clear_status(port); 406 pcie_clear_root_pme_status(port);
426 pcie_pme_interrupt_enable(port, true); 407 pcie_pme_interrupt_enable(port, true);
427 spin_unlock_irq(&data->lock); 408 spin_unlock_irq(&data->lock);
428 409
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7b5aba0a3291..bd00a01aef14 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,9 +20,6 @@
20 20
21#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 21#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
22 22
23extern bool pcie_ports_disabled;
24extern bool pcie_ports_auto;
25
26extern struct bus_type pcie_port_bus_type; 23extern struct bus_type pcie_port_bus_type;
27extern int pcie_port_device_register(struct pci_dev *dev); 24extern int pcie_port_device_register(struct pci_dev *dev);
28#ifdef CONFIG_PM 25#ifdef CONFIG_PM
@@ -35,6 +32,8 @@ extern void pcie_port_bus_unregister(void);
35 32
36struct pci_dev; 33struct pci_dev;
37 34
35extern void pcie_clear_root_pme_status(struct pci_dev *dev);
36
38#ifdef CONFIG_PCIE_PME 37#ifdef CONFIG_PCIE_PME
39extern bool pcie_pme_msi_disabled; 38extern bool pcie_pme_msi_disabled;
40 39
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 5982b6a63b89..a86b56e5f2f2 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -33,7 +33,7 @@
33 */ 33 */
34int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) 34int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
35{ 35{
36 acpi_status status; 36 struct acpi_pci_root *root;
37 acpi_handle handle; 37 acpi_handle handle;
38 u32 flags; 38 u32 flags;
39 39
@@ -44,26 +44,11 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
44 if (!handle) 44 if (!handle)
45 return -EINVAL; 45 return -EINVAL;
46 46
47 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 47 root = acpi_pci_find_root(handle);
48 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 48 if (!root)
49 | OSC_PCI_EXPRESS_PME_CONTROL;
50
51 if (pci_aer_available()) {
52 if (aer_acpi_firmware_first())
53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
54 else
55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
56 }
57
58 status = acpi_pci_osc_control_set(handle, &flags,
59 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
60 if (ACPI_FAILURE(status)) {
61 dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
62 status);
63 return -ENODEV; 49 return -ENODEV;
64 }
65 50
66 dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags); 51 flags = root->osc_control_set;
67 52
68 *srv_mask = PCIE_PORT_SERVICE_VC; 53 *srv_mask = PCIE_PORT_SERVICE_VC;
69 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) 54 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a9c222d79ebc..5130d0d22390 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -241,17 +241,17 @@ static int get_port_device_capability(struct pci_dev *dev)
241 int cap_mask; 241 int cap_mask;
242 int err; 242 int err;
243 243
244 if (pcie_ports_disabled)
245 return 0;
246
244 err = pcie_port_platform_notify(dev, &cap_mask); 247 err = pcie_port_platform_notify(dev, &cap_mask);
245 if (pcie_ports_auto) { 248 if (!pcie_ports_auto) {
246 if (err) {
247 pcie_no_aspm();
248 return 0;
249 }
250 } else {
251 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP 249 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
252 | PCIE_PORT_SERVICE_VC; 250 | PCIE_PORT_SERVICE_VC;
253 if (pci_aer_available()) 251 if (pci_aer_available())
254 cap_mask |= PCIE_PORT_SERVICE_AER; 252 cap_mask |= PCIE_PORT_SERVICE_AER;
253 } else if (err) {
254 return 0;
255 } 255 }
256 256
257 pos = pci_pcie_cap(dev); 257 pos = pci_pcie_cap(dev);
@@ -349,15 +349,18 @@ int pcie_port_device_register(struct pci_dev *dev)
349 int status, capabilities, i, nr_service; 349 int status, capabilities, i, nr_service;
350 int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; 350 int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
351 351
352 /* Get and check PCI Express port services */
353 capabilities = get_port_device_capability(dev);
354 if (!capabilities)
355 return -ENODEV;
356
357 /* Enable PCI Express port device */ 352 /* Enable PCI Express port device */
358 status = pci_enable_device(dev); 353 status = pci_enable_device(dev);
359 if (status) 354 if (status)
360 return status; 355 return status;
356
357 /* Get and check PCI Express port services */
358 capabilities = get_port_device_capability(dev);
359 if (!capabilities) {
360 pcie_no_aspm();
361 return 0;
362 }
363
361 pci_set_master(dev); 364 pci_set_master(dev);
362 /* 365 /*
363 * Initialize service irqs. Don't use service devices that 366 * Initialize service irqs. Don't use service devices that
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9033e190fb6..e0610bda1dea 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -57,6 +57,22 @@ __setup("pcie_ports=", pcie_port_setup);
57 57
58/* global data */ 58/* global data */
59 59
60/**
61 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
62 * @dev: PCIe root port or event collector.
63 */
64void pcie_clear_root_pme_status(struct pci_dev *dev)
65{
66 int rtsta_pos;
67 u32 rtsta;
68
69 rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
70
71 pci_read_config_dword(dev, rtsta_pos, &rtsta);
72 rtsta |= PCI_EXP_RTSTA_PME;
73 pci_write_config_dword(dev, rtsta_pos, rtsta);
74}
75
60static int pcie_portdrv_restore_config(struct pci_dev *dev) 76static int pcie_portdrv_restore_config(struct pci_dev *dev)
61{ 77{
62 int retval; 78 int retval;
@@ -69,6 +85,20 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
69} 85}
70 86
71#ifdef CONFIG_PM 87#ifdef CONFIG_PM
88static int pcie_port_resume_noirq(struct device *dev)
89{
90 struct pci_dev *pdev = to_pci_dev(dev);
91
92 /*
93 * Some BIOSes forget to clear Root PME Status bits after system wakeup
94 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
95 * bits now just in case (shouldn't hurt).
96 */
97 if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
98 pcie_clear_root_pme_status(pdev);
99 return 0;
100}
101
72static const struct dev_pm_ops pcie_portdrv_pm_ops = { 102static const struct dev_pm_ops pcie_portdrv_pm_ops = {
73 .suspend = pcie_port_device_suspend, 103 .suspend = pcie_port_device_suspend,
74 .resume = pcie_port_device_resume, 104 .resume = pcie_port_device_resume,
@@ -76,6 +106,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
76 .thaw = pcie_port_device_resume, 106 .thaw = pcie_port_device_resume,
77 .poweroff = pcie_port_device_suspend, 107 .poweroff = pcie_port_device_suspend,
78 .restore = pcie_port_device_resume, 108 .restore = pcie_port_device_resume,
109 .resume_noirq = pcie_port_resume_noirq,
79}; 110};
80 111
81#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) 112#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -327,10 +358,8 @@ static int __init pcie_portdrv_init(void)
327{ 358{
328 int retval; 359 int retval;
329 360
330 if (pcie_ports_disabled) { 361 if (pcie_ports_disabled)
331 pcie_no_aspm(); 362 return pci_register_driver(&pcie_portdriver);
332 return -EACCES;
333 }
334 363
335 dmi_check_system(pcie_portdrv_dmi_table); 364 dmi_check_system(pcie_portdrv_dmi_table);
336 365
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index de886f3dfd39..6e318ce41136 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,7 @@ comment "PC-card bridges"
69config YENTA 69config YENTA
70 tristate "CardBus yenta-compatible bridge support" 70 tristate "CardBus yenta-compatible bridge support"
71 depends on PCI 71 depends on PCI
72 select CARDBUS if !EMBEDDED 72 select CARDBUS if !EXPERT
73 select PCCARD_NONSTATIC if PCMCIA != n 73 select PCCARD_NONSTATIC if PCMCIA != n
74 ---help--- 74 ---help---
75 This option enables support for CardBus host bridges. Virtually 75 This option enables support for CardBus host bridges. Virtually
@@ -84,27 +84,27 @@ config YENTA
84 84
85config YENTA_O2 85config YENTA_O2
86 default y 86 default y
87 bool "Special initialization for O2Micro bridges" if EMBEDDED 87 bool "Special initialization for O2Micro bridges" if EXPERT
88 depends on YENTA 88 depends on YENTA
89 89
90config YENTA_RICOH 90config YENTA_RICOH
91 default y 91 default y
92 bool "Special initialization for Ricoh bridges" if EMBEDDED 92 bool "Special initialization for Ricoh bridges" if EXPERT
93 depends on YENTA 93 depends on YENTA
94 94
95config YENTA_TI 95config YENTA_TI
96 default y 96 default y
97 bool "Special initialization for TI and EnE bridges" if EMBEDDED 97 bool "Special initialization for TI and EnE bridges" if EXPERT
98 depends on YENTA 98 depends on YENTA
99 99
100config YENTA_ENE_TUNE 100config YENTA_ENE_TUNE
101 default y 101 default y
102 bool "Auto-tune EnE bridges for CB cards" if EMBEDDED 102 bool "Auto-tune EnE bridges for CB cards" if EXPERT
103 depends on YENTA_TI && CARDBUS 103 depends on YENTA_TI && CARDBUS
104 104
105config YENTA_TOSHIBA 105config YENTA_TOSHIBA
106 default y 106 default y
107 bool "Special initialization for Toshiba ToPIC bridges" if EMBEDDED 107 bool "Special initialization for Toshiba ToPIC bridges" if EXPERT
108 depends on YENTA 108 depends on YENTA
109 109
110config PD6729 110config PD6729
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 19e92b2a7f7e..95e3b0948e9c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -689,7 +689,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
689 if (error) 689 if (error)
690 goto err_free_input_dev; 690 goto err_free_input_dev;
691 691
692 result = acpi_bus_get_power(fujitsu->acpi_handle, &state); 692 result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
693 if (result) { 693 if (result) {
694 printk(KERN_ERR "Error reading power state\n"); 694 printk(KERN_ERR "Error reading power state\n");
695 goto err_unregister_input_dev; 695 goto err_unregister_input_dev;
@@ -857,7 +857,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
857 if (error) 857 if (error)
858 goto err_free_input_dev; 858 goto err_free_input_dev;
859 859
860 result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); 860 result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
861 if (result) { 861 if (result) {
862 printk(KERN_ERR "Error reading power state\n"); 862 printk(KERN_ERR "Error reading power state\n");
863 goto err_unregister_input_dev; 863 goto err_unregister_input_dev;
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 1752ef006d26..a91d510a798b 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,7 +26,6 @@
26#include <linux/sfi.h> 26#include <linux/sfi.h>
27#include <asm/mrst.h> 27#include <asm/mrst.h>
28#include <asm/intel_scu_ipc.h> 28#include <asm/intel_scu_ipc.h>
29#include <asm/mrst.h>
30 29
31/* IPC defines the following message types */ 30/* IPC defines the following message types */
32#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ 31#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
@@ -161,7 +160,7 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
161{ 160{
162 int i, nc, bytes, d; 161 int i, nc, bytes, d;
163 u32 offset = 0; 162 u32 offset = 0;
164 u32 err = 0; 163 int err;
165 u8 cbuf[IPC_WWBUF_SIZE] = { }; 164 u8 cbuf[IPC_WWBUF_SIZE] = { };
166 u32 *wbuf = (u32 *)&cbuf; 165 u32 *wbuf = (u32 *)&cbuf;
167 166
@@ -404,7 +403,7 @@ EXPORT_SYMBOL(intel_scu_ipc_update_register);
404 */ 403 */
405int intel_scu_ipc_simple_command(int cmd, int sub) 404int intel_scu_ipc_simple_command(int cmd, int sub)
406{ 405{
407 u32 err = 0; 406 int err;
408 407
409 mutex_lock(&ipclock); 408 mutex_lock(&ipclock);
410 if (ipcdev.pdev == NULL) { 409 if (ipcdev.pdev == NULL) {
@@ -434,8 +433,7 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
434int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, 433int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
435 u32 *out, int outlen) 434 u32 *out, int outlen)
436{ 435{
437 u32 err = 0; 436 int i, err;
438 int i = 0;
439 437
440 mutex_lock(&ipclock); 438 mutex_lock(&ipclock);
441 if (ipcdev.pdev == NULL) { 439 if (ipcdev.pdev == NULL) {
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index ba3231d0819e..b93a03259c16 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -128,6 +128,6 @@ static void __exit ipc_module_exit(void)
128module_init(ipc_module_init); 128module_init(ipc_module_init);
129module_exit(ipc_module_exit); 129module_exit(ipc_module_exit);
130 130
131MODULE_LICENSE("GPL V2"); 131MODULE_LICENSE("GPL v2");
132MODULE_DESCRIPTION("Utility driver for intel scu ipc"); 132MODULE_DESCRIPTION("Utility driver for intel scu ipc");
133MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>"); 133MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 8de3775ec242..bfba893cb321 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,11 +2,13 @@
2# Makefile for the Linux Plug-and-Play Support. 2# Makefile for the Linux Plug-and-Play Support.
3# 3#
4 4
5obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o 5obj-y := pnp.o
6
7pnp-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
6 8
7obj-$(CONFIG_PNPACPI) += pnpacpi/ 9obj-$(CONFIG_PNPACPI) += pnpacpi/
8obj-$(CONFIG_PNPBIOS) += pnpbios/ 10obj-$(CONFIG_PNPBIOS) += pnpbios/
9obj-$(CONFIG_ISAPNP) += isapnp/ 11obj-$(CONFIG_ISAPNP) += isapnp/
10 12
11# pnp_system_init goes after pnpacpi/pnpbios init 13# pnp_system_init goes after pnpacpi/pnpbios init
12obj-y += system.o 14pnp-y += system.o
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 0f34d962fd3c..cb6ce42f8e77 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -220,10 +220,5 @@ subsys_initcall(pnp_init);
220int pnp_debug; 220int pnp_debug;
221 221
222#if defined(CONFIG_PNP_DEBUG_MESSAGES) 222#if defined(CONFIG_PNP_DEBUG_MESSAGES)
223static int __init pnp_debug_setup(char *__unused) 223module_param_named(debug, pnp_debug, int, 0644);
224{
225 pnp_debug = 1;
226 return 1;
227}
228__setup("pnp.debug", pnp_debug_setup);
229#endif 224#endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index d1dbb9df53fa..00e94032531a 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -189,8 +189,11 @@ static int pnp_bus_resume(struct device *dev)
189 if (!pnp_drv) 189 if (!pnp_drv)
190 return 0; 190 return 0;
191 191
192 if (pnp_dev->protocol->resume) 192 if (pnp_dev->protocol->resume) {
193 pnp_dev->protocol->resume(pnp_dev); 193 error = pnp_dev->protocol->resume(pnp_dev);
194 if (error)
195 return error;
196 }
194 197
195 if (pnp_can_write(pnp_dev)) { 198 if (pnp_can_write(pnp_dev)) {
196 error = pnp_start_dev(pnp_dev); 199 error = pnp_start_dev(pnp_dev);
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index cac18bbfb817..6e607aa33aa3 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -1,7 +1,7 @@
1# 1#
2# Makefile for the kernel ISAPNP driver. 2# Makefile for the kernel ISAPNP driver.
3# 3#
4obj-y += pnp.o
5pnp-y := core.o compat.o
4 6
5isapnp-proc-$(CONFIG_PROC_FS) = proc.o 7pnp-$(CONFIG_PROC_FS) += proc.o
6
7obj-y := core.o compat.o $(isapnp-proc-y)
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 905326fcca85..40c93da18252 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -1,5 +1,6 @@
1# 1#
2# Makefile for the kernel PNPACPI driver. 2# Makefile for the kernel PNPACPI driver.
3# 3#
4obj-y += pnp.o
4 5
5obj-y := core.o rsparser.o 6pnp-y := core.o rsparser.o
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 57313f4658bc..ca84d5099ce7 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -81,12 +81,19 @@ static int pnpacpi_get_resources(struct pnp_dev *dev)
81 81
82static int pnpacpi_set_resources(struct pnp_dev *dev) 82static int pnpacpi_set_resources(struct pnp_dev *dev)
83{ 83{
84 struct acpi_device *acpi_dev = dev->data; 84 struct acpi_device *acpi_dev;
85 acpi_handle handle = acpi_dev->handle; 85 acpi_handle handle;
86 struct acpi_buffer buffer; 86 struct acpi_buffer buffer;
87 int ret; 87 int ret;
88 88
89 pnp_dbg(&dev->dev, "set resources\n"); 89 pnp_dbg(&dev->dev, "set resources\n");
90
91 handle = DEVICE_ACPI_HANDLE(&dev->dev);
92 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
93 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
94 return -ENODEV;
95 }
96
90 ret = pnpacpi_build_resource_template(dev, &buffer); 97 ret = pnpacpi_build_resource_template(dev, &buffer);
91 if (ret) 98 if (ret)
92 return ret; 99 return ret;
@@ -105,12 +112,18 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
105 112
106static int pnpacpi_disable_resources(struct pnp_dev *dev) 113static int pnpacpi_disable_resources(struct pnp_dev *dev)
107{ 114{
108 struct acpi_device *acpi_dev = dev->data; 115 struct acpi_device *acpi_dev;
109 acpi_handle handle = acpi_dev->handle; 116 acpi_handle handle;
110 int ret; 117 int ret;
111 118
112 dev_dbg(&dev->dev, "disable resources\n"); 119 dev_dbg(&dev->dev, "disable resources\n");
113 120
121 handle = DEVICE_ACPI_HANDLE(&dev->dev);
122 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
123 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
124 return 0;
125 }
126
114 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 127 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
115 ret = 0; 128 ret = 0;
116 if (acpi_bus_power_manageable(handle)) 129 if (acpi_bus_power_manageable(handle))
@@ -124,46 +137,74 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
124#ifdef CONFIG_ACPI_SLEEP 137#ifdef CONFIG_ACPI_SLEEP
125static bool pnpacpi_can_wakeup(struct pnp_dev *dev) 138static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
126{ 139{
127 struct acpi_device *acpi_dev = dev->data; 140 struct acpi_device *acpi_dev;
128 acpi_handle handle = acpi_dev->handle; 141 acpi_handle handle;
142
143 handle = DEVICE_ACPI_HANDLE(&dev->dev);
144 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
145 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
146 return false;
147 }
129 148
130 return acpi_bus_can_wakeup(handle); 149 return acpi_bus_can_wakeup(handle);
131} 150}
132 151
133static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) 152static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
134{ 153{
135 struct acpi_device *acpi_dev = dev->data; 154 struct acpi_device *acpi_dev;
136 acpi_handle handle = acpi_dev->handle; 155 acpi_handle handle;
137 int power_state; 156 int error = 0;
157
158 handle = DEVICE_ACPI_HANDLE(&dev->dev);
159 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
160 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
161 return 0;
162 }
138 163
139 if (device_can_wakeup(&dev->dev)) { 164 if (device_can_wakeup(&dev->dev)) {
140 int rc = acpi_pm_device_sleep_wake(&dev->dev, 165 error = acpi_pm_device_sleep_wake(&dev->dev,
141 device_may_wakeup(&dev->dev)); 166 device_may_wakeup(&dev->dev));
167 if (error)
168 return error;
169 }
170
171 if (acpi_bus_power_manageable(handle)) {
172 int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
173
174 if (power_state < 0)
175 power_state = (state.event == PM_EVENT_ON) ?
176 ACPI_STATE_D0 : ACPI_STATE_D3;
142 177
143 if (rc) 178 /*
144 return rc; 179 * acpi_bus_set_power() often fails (keyboard port can't be
180 * powered-down?), and in any case, our return value is ignored
181 * by pnp_bus_suspend(). Hence we don't revert the wakeup
182 * setting if the set_power fails.
183 */
184 error = acpi_bus_set_power(handle, power_state);
145 } 185 }
146 power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 186
147 if (power_state < 0) 187 return error;
148 power_state = (state.event == PM_EVENT_ON) ?
149 ACPI_STATE_D0 : ACPI_STATE_D3;
150
151 /* acpi_bus_set_power() often fails (keyboard port can't be
152 * powered-down?), and in any case, our return value is ignored
153 * by pnp_bus_suspend(). Hence we don't revert the wakeup
154 * setting if the set_power fails.
155 */
156 return acpi_bus_set_power(handle, power_state);
157} 188}
158 189
159static int pnpacpi_resume(struct pnp_dev *dev) 190static int pnpacpi_resume(struct pnp_dev *dev)
160{ 191{
161 struct acpi_device *acpi_dev = dev->data; 192 struct acpi_device *acpi_dev;
162 acpi_handle handle = acpi_dev->handle; 193 acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
194 int error = 0;
195
196 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
197 dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
198 return -ENODEV;
199 }
163 200
164 if (device_may_wakeup(&dev->dev)) 201 if (device_may_wakeup(&dev->dev))
165 acpi_pm_device_sleep_wake(&dev->dev, false); 202 acpi_pm_device_sleep_wake(&dev->dev, false);
166 return acpi_bus_set_power(handle, ACPI_STATE_D0); 203
204 if (acpi_bus_power_manageable(handle))
205 error = acpi_bus_set_power(handle, ACPI_STATE_D0);
206
207 return error;
167} 208}
168#endif 209#endif
169 210
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 3cd3ed760605..240b0ffb83ca 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -1,7 +1,8 @@
1# 1#
2# Makefile for the kernel PNPBIOS driver. 2# Makefile for the kernel PNPBIOS driver.
3# 3#
4obj-y := pnp.o
4 5
5pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o 6pnp-y := core.o bioscalls.o rsparser.o
6 7
7obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) 8pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 60d83d983a36..61bf5d724139 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -136,6 +136,16 @@ config BATTERY_MAX17040
136 in handheld and portable equipment. The MAX17040 is configured 136 in handheld and portable equipment. The MAX17040 is configured
137 to operate with a single lithium cell 137 to operate with a single lithium cell
138 138
139config BATTERY_MAX17042
140 tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
141 depends on I2C
142 help
143 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
144 in handheld and portable equipment. The MAX17042 is configured
145 to operate with a single lithium cell. MAX8997 and MAX8966 are
146 multi-function devices that include fuel gauages that are compatible
147 with MAX17042.
148
139config BATTERY_Z2 149config BATTERY_Z2
140 tristate "Z2 battery driver" 150 tristate "Z2 battery driver"
141 depends on I2C && MACH_ZIPIT2 151 depends on I2C && MACH_ZIPIT2
@@ -185,4 +195,14 @@ config CHARGER_TWL4030
185 help 195 help
186 Say Y here to enable support for TWL4030 Battery Charge Interface. 196 Say Y here to enable support for TWL4030 Battery Charge Interface.
187 197
198config CHARGER_GPIO
199 tristate "GPIO charger"
200 depends on GPIOLIB
201 help
202 Say Y to include support for chargers which report their online status
203 through a GPIO pin.
204
205 This driver can be build as a module. If so, the module will be
206 called gpio-charger.
207
188endif # POWER_SUPPLY 208endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c75772eb157c..8385bfae8728 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
25obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o 25obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
26obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o 26obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
27obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o 27obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
28obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
28obj-$(CONFIG_BATTERY_Z2) += z2_battery.o 29obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
29obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o 30obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
30obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o 31obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
@@ -32,3 +33,4 @@ obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
32obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o 33obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
33obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o 34obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
34obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 35obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
36obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 039f41ae217d..548d263b1ad0 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -295,7 +295,7 @@ static struct {
295static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state) 295static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
296{ 296{
297 /* flush all pending status updates */ 297 /* flush all pending status updates */
298 flush_scheduled_work(); 298 flush_work_sync(&bat_work);
299 return 0; 299 return 0;
300} 300}
301 301
@@ -362,7 +362,7 @@ err_psy_reg_bu:
362err_psy_reg_main: 362err_psy_reg_main:
363 363
364 /* see comment in collie_bat_remove */ 364 /* see comment in collie_bat_remove */
365 flush_scheduled_work(); 365 cancel_work_sync(&bat_work);
366 366
367 i--; 367 i--;
368err_gpio: 368err_gpio:
@@ -382,12 +382,11 @@ static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
382 power_supply_unregister(&collie_bat_main.psy); 382 power_supply_unregister(&collie_bat_main.psy);
383 383
384 /* 384 /*
385 * now flush all pending work. 385 * Now cancel the bat_work. We won't get any more schedules,
386 * we won't get any more schedules, since all 386 * since all sources (isr and external_power_changed) are
387 * sources (isr and external_power_changed) 387 * unregistered now.
388 * are unregistered now.
389 */ 388 */
390 flush_scheduled_work(); 389 cancel_work_sync(&bat_work);
391 390
392 for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--) 391 for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
393 gpio_free(gpios[i].gpio); 392 gpio_free(gpios[i].gpio);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e7f89785beef..e534290f3256 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
212 if (di->rem_capacity > 100) 212 if (di->rem_capacity > 100)
213 di->rem_capacity = 100; 213 di->rem_capacity = 100;
214 214
215 if (di->current_uA >= 100L) 215 if (di->current_uA < -100L)
216 di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L) 216 di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
217 / (di->current_uA / 100L); 217 / (di->current_uA / 100L);
218 else 218 else
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
new file mode 100644
index 000000000000..25b88ac1d44c
--- /dev/null
+++ b/drivers/power/gpio-charger.c
@@ -0,0 +1,188 @@
1/*
2 * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
3 * Driver for chargers which report their online status through a GPIO pin
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15
16#include <linux/device.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/power_supply.h>
24#include <linux/slab.h>
25
26#include <linux/power/gpio-charger.h>
27
28struct gpio_charger {
29 const struct gpio_charger_platform_data *pdata;
30 unsigned int irq;
31
32 struct power_supply charger;
33};
34
35static irqreturn_t gpio_charger_irq(int irq, void *devid)
36{
37 struct power_supply *charger = devid;
38
39 power_supply_changed(charger);
40
41 return IRQ_HANDLED;
42}
43
44static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy)
45{
46 return container_of(psy, struct gpio_charger, charger);
47}
48
49static int gpio_charger_get_property(struct power_supply *psy,
50 enum power_supply_property psp, union power_supply_propval *val)
51{
52 struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy);
53 const struct gpio_charger_platform_data *pdata = gpio_charger->pdata;
54
55 switch (psp) {
56 case POWER_SUPPLY_PROP_ONLINE:
57 val->intval = gpio_get_value(pdata->gpio);
58 val->intval ^= pdata->gpio_active_low;
59 break;
60 default:
61 return -EINVAL;
62 }
63
64 return 0;
65}
66
67static enum power_supply_property gpio_charger_properties[] = {
68 POWER_SUPPLY_PROP_ONLINE,
69};
70
71static int __devinit gpio_charger_probe(struct platform_device *pdev)
72{
73 const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
74 struct gpio_charger *gpio_charger;
75 struct power_supply *charger;
76 int ret;
77 int irq;
78
79 if (!pdata) {
80 dev_err(&pdev->dev, "No platform data\n");
81 return -EINVAL;
82 }
83
84 if (!gpio_is_valid(pdata->gpio)) {
85 dev_err(&pdev->dev, "Invalid gpio pin\n");
86 return -EINVAL;
87 }
88
89 gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL);
90 if (!gpio_charger) {
91 dev_err(&pdev->dev, "Failed to alloc driver structure\n");
92 return -ENOMEM;
93 }
94
95 charger = &gpio_charger->charger;
96
97 charger->name = pdata->name ? pdata->name : "gpio-charger";
98 charger->type = pdata->type;
99 charger->properties = gpio_charger_properties;
100 charger->num_properties = ARRAY_SIZE(gpio_charger_properties);
101 charger->get_property = gpio_charger_get_property;
102 charger->supplied_to = pdata->supplied_to;
103 charger->num_supplicants = pdata->num_supplicants;
104
105 ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
106 if (ret) {
107 dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
108 goto err_free;
109 }
110 ret = gpio_direction_input(pdata->gpio);
111 if (ret) {
112 dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
113 goto err_gpio_free;
114 }
115
116 gpio_charger->pdata = pdata;
117
118 ret = power_supply_register(&pdev->dev, charger);
119 if (ret < 0) {
120 dev_err(&pdev->dev, "Failed to register power supply: %d\n",
121 ret);
122 goto err_gpio_free;
123 }
124
125 irq = gpio_to_irq(pdata->gpio);
126 if (irq > 0) {
127 ret = request_any_context_irq(irq, gpio_charger_irq,
128 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
129 dev_name(&pdev->dev), charger);
130 if (ret)
131 dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
132 else
133 gpio_charger->irq = irq;
134 }
135
136 platform_set_drvdata(pdev, gpio_charger);
137
138 return 0;
139
140err_gpio_free:
141 gpio_free(pdata->gpio);
142err_free:
143 kfree(gpio_charger);
144 return ret;
145}
146
147static int __devexit gpio_charger_remove(struct platform_device *pdev)
148{
149 struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
150
151 if (gpio_charger->irq)
152 free_irq(gpio_charger->irq, &gpio_charger->charger);
153
154 power_supply_unregister(&gpio_charger->charger);
155
156 gpio_free(gpio_charger->pdata->gpio);
157
158 platform_set_drvdata(pdev, NULL);
159 kfree(gpio_charger);
160
161 return 0;
162}
163
164static struct platform_driver gpio_charger_driver = {
165 .probe = gpio_charger_probe,
166 .remove = __devexit_p(gpio_charger_remove),
167 .driver = {
168 .name = "gpio-charger",
169 .owner = THIS_MODULE,
170 },
171};
172
173static int __init gpio_charger_init(void)
174{
175 return platform_driver_register(&gpio_charger_driver);
176}
177module_init(gpio_charger_init);
178
179static void __exit gpio_charger_exit(void)
180{
181 platform_driver_unregister(&gpio_charger_driver);
182}
183module_exit(gpio_charger_exit);
184
185MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
186MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
187MODULE_LICENSE("GPL");
188MODULE_ALIAS("platform:gpio-charger");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 36cf402c0677..bce3a01da2f0 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -765,7 +765,7 @@ static int __devexit platform_pmic_battery_remove(struct platform_device *pdev)
765 power_supply_unregister(&pbi->usb); 765 power_supply_unregister(&pbi->usb);
766 power_supply_unregister(&pbi->batt); 766 power_supply_unregister(&pbi->batt);
767 767
768 flush_scheduled_work(); 768 cancel_work_sync(&pbi->handler);
769 kfree(pbi); 769 kfree(pbi);
770 return 0; 770 return 0;
771} 771}
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 72512185f3e2..2ad9b14a5ce3 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -59,11 +59,61 @@ struct isp1704_charger {
59 struct notifier_block nb; 59 struct notifier_block nb;
60 struct work_struct work; 60 struct work_struct work;
61 61
62 char model[7]; 62 /* properties */
63 char model[8];
63 unsigned present:1; 64 unsigned present:1;
65 unsigned online:1;
66 unsigned current_max;
67
68 /* temp storage variables */
69 unsigned long event;
70 unsigned max_power;
64}; 71};
65 72
66/* 73/*
74 * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
75 * chargers).
76 *
77 * REVISIT: The method is defined in Battery Charging Specification and is
78 * applicable to any ULPI transceiver. Nothing isp170x specific here.
79 */
80static inline int isp1704_charger_type(struct isp1704_charger *isp)
81{
82 u8 reg;
83 u8 func_ctrl;
84 u8 otg_ctrl;
85 int type = POWER_SUPPLY_TYPE_USB_DCP;
86
87 func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
88 otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL);
89
90 /* disable pulldowns */
91 reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN;
92 otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg);
93
94 /* full speed */
95 otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
96 ULPI_FUNC_CTRL_XCVRSEL_MASK);
97 otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL),
98 ULPI_FUNC_CTRL_FULL_SPEED);
99
100 /* Enable strong pull-up on DP (1.5K) and reset */
101 reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
102 otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg);
103 usleep_range(1000, 2000);
104
105 reg = otg_io_read(isp->otg, ULPI_DEBUG);
106 if ((reg & 3) != 3)
107 type = POWER_SUPPLY_TYPE_USB_CDP;
108
109 /* recover original state */
110 otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl);
111 otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl);
112
113 return type;
114}
115
116/*
67 * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger 117 * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
68 * is actually a dedicated charger, the following steps need to be taken. 118 * is actually a dedicated charger, the following steps need to be taken.
69 */ 119 */
@@ -127,16 +177,19 @@ static inline int isp1704_charger_verify(struct isp1704_charger *isp)
127static inline int isp1704_charger_detect(struct isp1704_charger *isp) 177static inline int isp1704_charger_detect(struct isp1704_charger *isp)
128{ 178{
129 unsigned long timeout; 179 unsigned long timeout;
130 u8 r; 180 u8 pwr_ctrl;
131 int ret = 0; 181 int ret = 0;
132 182
183 pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL);
184
133 /* set SW control bit in PWR_CTRL register */ 185 /* set SW control bit in PWR_CTRL register */
134 otg_io_write(isp->otg, ISP1704_PWR_CTRL, 186 otg_io_write(isp->otg, ISP1704_PWR_CTRL,
135 ISP1704_PWR_CTRL_SWCTRL); 187 ISP1704_PWR_CTRL_SWCTRL);
136 188
137 /* enable manual charger detection */ 189 /* enable manual charger detection */
138 r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN); 190 otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
139 otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r); 191 ISP1704_PWR_CTRL_SWCTRL
192 | ISP1704_PWR_CTRL_DPVSRC_EN);
140 usleep_range(1000, 2000); 193 usleep_range(1000, 2000);
141 194
142 timeout = jiffies + msecs_to_jiffies(300); 195 timeout = jiffies + msecs_to_jiffies(300);
@@ -147,7 +200,10 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
147 ret = isp1704_charger_verify(isp); 200 ret = isp1704_charger_verify(isp);
148 break; 201 break;
149 } 202 }
150 } while (!time_after(jiffies, timeout)); 203 } while (!time_after(jiffies, timeout) && isp->online);
204
205 /* recover original state */
206 otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl);
151 207
152 return ret; 208 return ret;
153} 209}
@@ -155,52 +211,92 @@ static inline int isp1704_charger_detect(struct isp1704_charger *isp)
155static void isp1704_charger_work(struct work_struct *data) 211static void isp1704_charger_work(struct work_struct *data)
156{ 212{
157 int detect; 213 int detect;
214 unsigned long event;
215 unsigned power;
158 struct isp1704_charger *isp = 216 struct isp1704_charger *isp =
159 container_of(data, struct isp1704_charger, work); 217 container_of(data, struct isp1704_charger, work);
218 static DEFINE_MUTEX(lock);
160 219
161 /* 220 event = isp->event;
162 * FIXME Only supporting dedicated chargers even though isp1704 can 221 power = isp->max_power;
163 * detect HUB and HOST chargers. If the device has already been
164 * enumerated, the detection will break the connection.
165 */
166 if (isp->otg->state != OTG_STATE_B_IDLE)
167 return;
168 222
169 /* disable data pullups */ 223 mutex_lock(&lock);
170 if (isp->otg->gadget) 224
171 usb_gadget_disconnect(isp->otg->gadget); 225 switch (event) {
226 case USB_EVENT_VBUS:
227 isp->online = true;
228
229 /* detect charger */
230 detect = isp1704_charger_detect(isp);
231
232 if (detect) {
233 isp->present = detect;
234 isp->psy.type = isp1704_charger_type(isp);
235 }
172 236
173 /* detect charger */ 237 switch (isp->psy.type) {
174 detect = isp1704_charger_detect(isp); 238 case POWER_SUPPLY_TYPE_USB_DCP:
175 if (detect) { 239 isp->current_max = 1800;
176 isp->present = detect; 240 break;
177 power_supply_changed(&isp->psy); 241 case POWER_SUPPLY_TYPE_USB_CDP:
242 /*
243 * Only 500mA here or high speed chirp
244 * handshaking may break
245 */
246 isp->current_max = 500;
247 /* FALLTHROUGH */
248 case POWER_SUPPLY_TYPE_USB:
249 default:
250 /* enable data pullups */
251 if (isp->otg->gadget)
252 usb_gadget_connect(isp->otg->gadget);
253 }
254 break;
255 case USB_EVENT_NONE:
256 isp->online = false;
257 isp->current_max = 0;
258 isp->present = 0;
259 isp->current_max = 0;
260 isp->psy.type = POWER_SUPPLY_TYPE_USB;
261
262 /*
263 * Disable data pullups. We need to prevent the controller from
264 * enumerating.
265 *
266 * FIXME: This is here to allow charger detection with Host/HUB
267 * chargers. The pullups may be enabled elsewhere, so this can
268 * not be the final solution.
269 */
270 if (isp->otg->gadget)
271 usb_gadget_disconnect(isp->otg->gadget);
272 break;
273 case USB_EVENT_ENUMERATED:
274 if (isp->present)
275 isp->current_max = 1800;
276 else
277 isp->current_max = power;
278 break;
279 default:
280 goto out;
178 } 281 }
179 282
180 /* enable data pullups */ 283 power_supply_changed(&isp->psy);
181 if (isp->otg->gadget) 284out:
182 usb_gadget_connect(isp->otg->gadget); 285 mutex_unlock(&lock);
183} 286}
184 287
185static int isp1704_notifier_call(struct notifier_block *nb, 288static int isp1704_notifier_call(struct notifier_block *nb,
186 unsigned long event, void *unused) 289 unsigned long event, void *power)
187{ 290{
188 struct isp1704_charger *isp = 291 struct isp1704_charger *isp =
189 container_of(nb, struct isp1704_charger, nb); 292 container_of(nb, struct isp1704_charger, nb);
190 293
191 switch (event) { 294 isp->event = event;
192 case USB_EVENT_VBUS: 295
193 schedule_work(&isp->work); 296 if (power)
194 break; 297 isp->max_power = *((unsigned *)power);
195 case USB_EVENT_NONE: 298
196 if (isp->present) { 299 schedule_work(&isp->work);
197 isp->present = 0;
198 power_supply_changed(&isp->psy);
199 }
200 break;
201 default:
202 return NOTIFY_DONE;
203 }
204 300
205 return NOTIFY_OK; 301 return NOTIFY_OK;
206} 302}
@@ -216,6 +312,12 @@ static int isp1704_charger_get_property(struct power_supply *psy,
216 case POWER_SUPPLY_PROP_PRESENT: 312 case POWER_SUPPLY_PROP_PRESENT:
217 val->intval = isp->present; 313 val->intval = isp->present;
218 break; 314 break;
315 case POWER_SUPPLY_PROP_ONLINE:
316 val->intval = isp->online;
317 break;
318 case POWER_SUPPLY_PROP_CURRENT_MAX:
319 val->intval = isp->current_max;
320 break;
219 case POWER_SUPPLY_PROP_MODEL_NAME: 321 case POWER_SUPPLY_PROP_MODEL_NAME:
220 val->strval = isp->model; 322 val->strval = isp->model;
221 break; 323 break;
@@ -230,6 +332,8 @@ static int isp1704_charger_get_property(struct power_supply *psy,
230 332
231static enum power_supply_property power_props[] = { 333static enum power_supply_property power_props[] = {
232 POWER_SUPPLY_PROP_PRESENT, 334 POWER_SUPPLY_PROP_PRESENT,
335 POWER_SUPPLY_PROP_ONLINE,
336 POWER_SUPPLY_PROP_CURRENT_MAX,
233 POWER_SUPPLY_PROP_MODEL_NAME, 337 POWER_SUPPLY_PROP_MODEL_NAME,
234 POWER_SUPPLY_PROP_MANUFACTURER, 338 POWER_SUPPLY_PROP_MANUFACTURER,
235}; 339};
@@ -287,13 +391,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
287 if (!isp->otg) 391 if (!isp->otg)
288 goto fail0; 392 goto fail0;
289 393
394 isp->dev = &pdev->dev;
395 platform_set_drvdata(pdev, isp);
396
290 ret = isp1704_test_ulpi(isp); 397 ret = isp1704_test_ulpi(isp);
291 if (ret < 0) 398 if (ret < 0)
292 goto fail1; 399 goto fail1;
293 400
294 isp->dev = &pdev->dev;
295 platform_set_drvdata(pdev, isp);
296
297 isp->psy.name = "isp1704"; 401 isp->psy.name = "isp1704";
298 isp->psy.type = POWER_SUPPLY_TYPE_USB; 402 isp->psy.type = POWER_SUPPLY_TYPE_USB;
299 isp->psy.properties = power_props; 403 isp->psy.properties = power_props;
@@ -318,6 +422,23 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
318 422
319 dev_info(isp->dev, "registered with product id %s\n", isp->model); 423 dev_info(isp->dev, "registered with product id %s\n", isp->model);
320 424
425 /*
426 * Taking over the D+ pullup.
427 *
428 * FIXME: The device will be disconnected if it was already
429 * enumerated. The charger driver should be always loaded before any
430 * gadget is loaded.
431 */
432 if (isp->otg->gadget)
433 usb_gadget_disconnect(isp->otg->gadget);
434
435 /* Detect charger if VBUS is valid (the cable was already plugged). */
436 ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
437 if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
438 isp->event = USB_EVENT_VBUS;
439 schedule_work(&isp->work);
440 }
441
321 return 0; 442 return 0;
322fail2: 443fail2:
323 power_supply_unregister(&isp->psy); 444 power_supply_unregister(&isp->psy);
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index a8108a73593e..02414db6a94c 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/io.h>
22 23
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/gpio.h> 25#include <linux/gpio.h>
@@ -47,6 +48,8 @@ struct jz_battery {
47 48
48 struct power_supply battery; 49 struct power_supply battery;
49 struct delayed_work work; 50 struct delayed_work work;
51
52 struct mutex lock;
50}; 53};
51 54
52static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy) 55static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
@@ -68,6 +71,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
68 unsigned long val; 71 unsigned long val;
69 long voltage; 72 long voltage;
70 73
74 mutex_lock(&battery->lock);
75
71 INIT_COMPLETION(battery->read_completion); 76 INIT_COMPLETION(battery->read_completion);
72 77
73 enable_irq(battery->irq); 78 enable_irq(battery->irq);
@@ -91,6 +96,8 @@ static long jz_battery_read_voltage(struct jz_battery *battery)
91 battery->cell->disable(battery->pdev); 96 battery->cell->disable(battery->pdev);
92 disable_irq(battery->irq); 97 disable_irq(battery->irq);
93 98
99 mutex_unlock(&battery->lock);
100
94 return voltage; 101 return voltage;
95} 102}
96 103
@@ -240,6 +247,11 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
240 struct jz_battery *jz_battery; 247 struct jz_battery *jz_battery;
241 struct power_supply *battery; 248 struct power_supply *battery;
242 249
250 if (!pdata) {
251 dev_err(&pdev->dev, "No platform_data supplied\n");
252 return -ENXIO;
253 }
254
243 jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL); 255 jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
244 if (!jz_battery) { 256 if (!jz_battery) {
245 dev_err(&pdev->dev, "Failed to allocate driver structure\n"); 257 dev_err(&pdev->dev, "Failed to allocate driver structure\n");
@@ -291,6 +303,7 @@ static int __devinit jz_battery_probe(struct platform_device *pdev)
291 jz_battery->pdev = pdev; 303 jz_battery->pdev = pdev;
292 304
293 init_completion(&jz_battery->read_completion); 305 init_completion(&jz_battery->read_completion);
306 mutex_init(&jz_battery->lock);
294 307
295 INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work); 308 INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
296 309
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
new file mode 100644
index 000000000000..c5c8805156cb
--- /dev/null
+++ b/drivers/power/max17042_battery.c
@@ -0,0 +1,239 @@
1/*
2 * Fuel gauge driver for Maxim 17042 / 8966 / 8997
3 * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * This driver is based on max17040_battery.c
23 */
24
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/i2c.h>
28#include <linux/mod_devicetable.h>
29#include <linux/power_supply.h>
30#include <linux/power/max17042_battery.h>
31
32enum max17042_register {
33 MAX17042_STATUS = 0x00,
34 MAX17042_VALRT_Th = 0x01,
35 MAX17042_TALRT_Th = 0x02,
36 MAX17042_SALRT_Th = 0x03,
37 MAX17042_AtRate = 0x04,
38 MAX17042_RepCap = 0x05,
39 MAX17042_RepSOC = 0x06,
40 MAX17042_Age = 0x07,
41 MAX17042_TEMP = 0x08,
42 MAX17042_VCELL = 0x09,
43 MAX17042_Current = 0x0A,
44 MAX17042_AvgCurrent = 0x0B,
45 MAX17042_Qresidual = 0x0C,
46 MAX17042_SOC = 0x0D,
47 MAX17042_AvSOC = 0x0E,
48 MAX17042_RemCap = 0x0F,
49 MAX17402_FullCAP = 0x10,
50 MAX17042_TTE = 0x11,
51 MAX17042_V_empty = 0x12,
52
53 MAX17042_RSLOW = 0x14,
54
55 MAX17042_AvgTA = 0x16,
56 MAX17042_Cycles = 0x17,
57 MAX17042_DesignCap = 0x18,
58 MAX17042_AvgVCELL = 0x19,
59 MAX17042_MinMaxTemp = 0x1A,
60 MAX17042_MinMaxVolt = 0x1B,
61 MAX17042_MinMaxCurr = 0x1C,
62 MAX17042_CONFIG = 0x1D,
63 MAX17042_ICHGTerm = 0x1E,
64 MAX17042_AvCap = 0x1F,
65 MAX17042_ManName = 0x20,
66 MAX17042_DevName = 0x21,
67 MAX17042_DevChem = 0x22,
68
69 MAX17042_TempNom = 0x24,
70 MAX17042_TempCold = 0x25,
71 MAX17042_TempHot = 0x26,
72 MAX17042_AIN = 0x27,
73 MAX17042_LearnCFG = 0x28,
74 MAX17042_SHFTCFG = 0x29,
75 MAX17042_RelaxCFG = 0x2A,
76 MAX17042_MiscCFG = 0x2B,
77 MAX17042_TGAIN = 0x2C,
78 MAx17042_TOFF = 0x2D,
79 MAX17042_CGAIN = 0x2E,
80 MAX17042_COFF = 0x2F,
81
82 MAX17042_Q_empty = 0x33,
83 MAX17042_T_empty = 0x34,
84
85 MAX17042_RCOMP0 = 0x38,
86 MAX17042_TempCo = 0x39,
87 MAX17042_Rx = 0x3A,
88 MAX17042_T_empty0 = 0x3B,
89 MAX17042_TaskPeriod = 0x3C,
90 MAX17042_FSTAT = 0x3D,
91
92 MAX17042_SHDNTIMER = 0x3F,
93
94 MAX17042_VFRemCap = 0x4A,
95
96 MAX17042_QH = 0x4D,
97 MAX17042_QL = 0x4E,
98};
99
100struct max17042_chip {
101 struct i2c_client *client;
102 struct power_supply battery;
103 struct max17042_platform_data *pdata;
104};
105
106static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
107{
108 int ret = i2c_smbus_write_word_data(client, reg, value);
109
110 if (ret < 0)
111 dev_err(&client->dev, "%s: err %d\n", __func__, ret);
112
113 return ret;
114}
115
116static int max17042_read_reg(struct i2c_client *client, u8 reg)
117{
118 int ret = i2c_smbus_read_word_data(client, reg);
119
120 if (ret < 0)
121 dev_err(&client->dev, "%s: err %d\n", __func__, ret);
122
123 return ret;
124}
125
126static enum power_supply_property max17042_battery_props[] = {
127 POWER_SUPPLY_PROP_VOLTAGE_NOW,
128 POWER_SUPPLY_PROP_VOLTAGE_AVG,
129 POWER_SUPPLY_PROP_CAPACITY,
130};
131
132static int max17042_get_property(struct power_supply *psy,
133 enum power_supply_property psp,
134 union power_supply_propval *val)
135{
136 struct max17042_chip *chip = container_of(psy,
137 struct max17042_chip, battery);
138
139 switch (psp) {
140 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
141 val->intval = max17042_read_reg(chip->client,
142 MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
143 break;
144 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
145 val->intval = max17042_read_reg(chip->client,
146 MAX17042_AvgVCELL) * 83;
147 break;
148 case POWER_SUPPLY_PROP_CAPACITY:
149 val->intval = max17042_read_reg(chip->client,
150 MAX17042_SOC) / 256;
151 break;
152 default:
153 return -EINVAL;
154 }
155 return 0;
156}
157
158static int __devinit max17042_probe(struct i2c_client *client,
159 const struct i2c_device_id *id)
160{
161 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
162 struct max17042_chip *chip;
163 int ret;
164
165 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
166 return -EIO;
167
168 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
169 if (!chip)
170 return -ENOMEM;
171
172 chip->client = client;
173 chip->pdata = client->dev.platform_data;
174
175 i2c_set_clientdata(client, chip);
176
177 chip->battery.name = "max17042_battery";
178 chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
179 chip->battery.get_property = max17042_get_property;
180 chip->battery.properties = max17042_battery_props;
181 chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
182
183 ret = power_supply_register(&client->dev, &chip->battery);
184 if (ret) {
185 dev_err(&client->dev, "failed: power supply register\n");
186 i2c_set_clientdata(client, NULL);
187 kfree(chip);
188 return ret;
189 }
190
191 if (!chip->pdata->enable_current_sense) {
192 max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
193 max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
194 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
195 }
196
197 return 0;
198}
199
200static int __devexit max17042_remove(struct i2c_client *client)
201{
202 struct max17042_chip *chip = i2c_get_clientdata(client);
203
204 power_supply_unregister(&chip->battery);
205 i2c_set_clientdata(client, NULL);
206 kfree(chip);
207 return 0;
208}
209
210static const struct i2c_device_id max17042_id[] = {
211 { "max17042", 0 },
212 { }
213};
214MODULE_DEVICE_TABLE(i2c, max17042_id);
215
216static struct i2c_driver max17042_i2c_driver = {
217 .driver = {
218 .name = "max17042",
219 },
220 .probe = max17042_probe,
221 .remove = __devexit_p(max17042_remove),
222 .id_table = max17042_id,
223};
224
225static int __init max17042_init(void)
226{
227 return i2c_add_driver(&max17042_i2c_driver);
228}
229module_init(max17042_init);
230
231static void __exit max17042_exit(void)
232{
233 i2c_del_driver(&max17042_i2c_driver);
234}
235module_exit(max17042_exit);
236
237MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
238MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
239MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5bc1dcf7785e..0b0ff3a936a6 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -201,6 +201,72 @@ static int olpc_bat_get_tech(union power_supply_propval *val)
201 return ret; 201 return ret;
202} 202}
203 203
204static int olpc_bat_get_charge_full_design(union power_supply_propval *val)
205{
206 uint8_t ec_byte;
207 union power_supply_propval tech;
208 int ret, mfr;
209
210 ret = olpc_bat_get_tech(&tech);
211 if (ret)
212 return ret;
213
214 ec_byte = BAT_ADDR_MFR_TYPE;
215 ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
216 if (ret)
217 return ret;
218
219 mfr = ec_byte >> 4;
220
221 switch (tech.intval) {
222 case POWER_SUPPLY_TECHNOLOGY_NiMH:
223 switch (mfr) {
224 case 1: /* Gold Peak */
225 val->intval = 3000000*.8;
226 break;
227 default:
228 return -EIO;
229 }
230 break;
231
232 case POWER_SUPPLY_TECHNOLOGY_LiFe:
233 switch (mfr) {
234 case 1: /* Gold Peak */
235 val->intval = 2800000;
236 break;
237 case 2: /* BYD */
238 val->intval = 3100000;
239 break;
240 default:
241 return -EIO;
242 }
243 break;
244
245 default:
246 return -EIO;
247 }
248
249 return ret;
250}
251
252static int olpc_bat_get_charge_now(union power_supply_propval *val)
253{
254 uint8_t soc;
255 union power_supply_propval full;
256 int ret;
257
258 ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &soc, 1);
259 if (ret)
260 return ret;
261
262 ret = olpc_bat_get_charge_full_design(&full);
263 if (ret)
264 return ret;
265
266 val->intval = soc * (full.intval / 100);
267 return 0;
268}
269
204/********************************************************************* 270/*********************************************************************
205 * Battery properties 271 * Battery properties
206 *********************************************************************/ 272 *********************************************************************/
@@ -267,6 +333,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
267 return ret; 333 return ret;
268 break; 334 break;
269 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 335 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
336 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
270 ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2); 337 ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2);
271 if (ret) 338 if (ret)
272 return ret; 339 return ret;
@@ -274,6 +341,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
274 val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32; 341 val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
275 break; 342 break;
276 case POWER_SUPPLY_PROP_CURRENT_AVG: 343 case POWER_SUPPLY_PROP_CURRENT_AVG:
344 case POWER_SUPPLY_PROP_CURRENT_NOW:
277 ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2); 345 ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
278 if (ret) 346 if (ret)
279 return ret; 347 return ret;
@@ -294,6 +362,16 @@ static int olpc_bat_get_property(struct power_supply *psy,
294 else 362 else
295 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; 363 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
296 break; 364 break;
365 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
366 ret = olpc_bat_get_charge_full_design(val);
367 if (ret)
368 return ret;
369 break;
370 case POWER_SUPPLY_PROP_CHARGE_NOW:
371 ret = olpc_bat_get_charge_now(val);
372 if (ret)
373 return ret;
374 break;
297 case POWER_SUPPLY_PROP_TEMP: 375 case POWER_SUPPLY_PROP_TEMP:
298 ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2); 376 ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
299 if (ret) 377 if (ret)
@@ -331,16 +409,20 @@ static int olpc_bat_get_property(struct power_supply *psy,
331 return ret; 409 return ret;
332} 410}
333 411
334static enum power_supply_property olpc_bat_props[] = { 412static enum power_supply_property olpc_xo1_bat_props[] = {
335 POWER_SUPPLY_PROP_STATUS, 413 POWER_SUPPLY_PROP_STATUS,
336 POWER_SUPPLY_PROP_CHARGE_TYPE, 414 POWER_SUPPLY_PROP_CHARGE_TYPE,
337 POWER_SUPPLY_PROP_PRESENT, 415 POWER_SUPPLY_PROP_PRESENT,
338 POWER_SUPPLY_PROP_HEALTH, 416 POWER_SUPPLY_PROP_HEALTH,
339 POWER_SUPPLY_PROP_TECHNOLOGY, 417 POWER_SUPPLY_PROP_TECHNOLOGY,
340 POWER_SUPPLY_PROP_VOLTAGE_AVG, 418 POWER_SUPPLY_PROP_VOLTAGE_AVG,
419 POWER_SUPPLY_PROP_VOLTAGE_NOW,
341 POWER_SUPPLY_PROP_CURRENT_AVG, 420 POWER_SUPPLY_PROP_CURRENT_AVG,
421 POWER_SUPPLY_PROP_CURRENT_NOW,
342 POWER_SUPPLY_PROP_CAPACITY, 422 POWER_SUPPLY_PROP_CAPACITY,
343 POWER_SUPPLY_PROP_CAPACITY_LEVEL, 423 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
424 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
425 POWER_SUPPLY_PROP_CHARGE_NOW,
344 POWER_SUPPLY_PROP_TEMP, 426 POWER_SUPPLY_PROP_TEMP,
345 POWER_SUPPLY_PROP_TEMP_AMBIENT, 427 POWER_SUPPLY_PROP_TEMP_AMBIENT,
346 POWER_SUPPLY_PROP_MANUFACTURER, 428 POWER_SUPPLY_PROP_MANUFACTURER,
@@ -348,6 +430,27 @@ static enum power_supply_property olpc_bat_props[] = {
348 POWER_SUPPLY_PROP_CHARGE_COUNTER, 430 POWER_SUPPLY_PROP_CHARGE_COUNTER,
349}; 431};
350 432
433/* XO-1.5 does not have ambient temperature property */
434static enum power_supply_property olpc_xo15_bat_props[] = {
435 POWER_SUPPLY_PROP_STATUS,
436 POWER_SUPPLY_PROP_CHARGE_TYPE,
437 POWER_SUPPLY_PROP_PRESENT,
438 POWER_SUPPLY_PROP_HEALTH,
439 POWER_SUPPLY_PROP_TECHNOLOGY,
440 POWER_SUPPLY_PROP_VOLTAGE_AVG,
441 POWER_SUPPLY_PROP_VOLTAGE_NOW,
442 POWER_SUPPLY_PROP_CURRENT_AVG,
443 POWER_SUPPLY_PROP_CURRENT_NOW,
444 POWER_SUPPLY_PROP_CAPACITY,
445 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
446 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
447 POWER_SUPPLY_PROP_CHARGE_NOW,
448 POWER_SUPPLY_PROP_TEMP,
449 POWER_SUPPLY_PROP_MANUFACTURER,
450 POWER_SUPPLY_PROP_SERIAL_NUMBER,
451 POWER_SUPPLY_PROP_CHARGE_COUNTER,
452};
453
351/* EEPROM reading goes completely around the power_supply API, sadly */ 454/* EEPROM reading goes completely around the power_supply API, sadly */
352 455
353#define EEPROM_START 0x20 456#define EEPROM_START 0x20
@@ -419,8 +522,6 @@ static struct device_attribute olpc_bat_error = {
419static struct platform_device *bat_pdev; 522static struct platform_device *bat_pdev;
420 523
421static struct power_supply olpc_bat = { 524static struct power_supply olpc_bat = {
422 .properties = olpc_bat_props,
423 .num_properties = ARRAY_SIZE(olpc_bat_props),
424 .get_property = olpc_bat_get_property, 525 .get_property = olpc_bat_get_property,
425 .use_for_apm = 1, 526 .use_for_apm = 1,
426}; 527};
@@ -466,6 +567,13 @@ static int __init olpc_bat_init(void)
466 goto ac_failed; 567 goto ac_failed;
467 568
468 olpc_bat.name = bat_pdev->name; 569 olpc_bat.name = bat_pdev->name;
570 if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
571 olpc_bat.properties = olpc_xo15_bat_props;
572 olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
573 } else { /* XO-1 */
574 olpc_bat.properties = olpc_xo1_bat_props;
575 olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
576 }
469 577
470 ret = power_supply_register(&bat_pdev->dev, &olpc_bat); 578 ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
471 if (ret) 579 if (ret)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 91606bb55318..970f7335d3a7 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -190,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
190 goto success; 190 goto success;
191 191
192create_triggers_failed: 192create_triggers_failed:
193 device_unregister(psy->dev); 193 device_del(dev);
194kobject_set_name_failed: 194kobject_set_name_failed:
195device_add_failed: 195device_add_failed:
196 kfree(dev); 196 put_device(dev);
197success: 197success:
198 return rc; 198 return rc;
199} 199}
@@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(power_supply_register);
201 201
202void power_supply_unregister(struct power_supply *psy) 202void power_supply_unregister(struct power_supply *psy)
203{ 203{
204 flush_scheduled_work(); 204 cancel_work_sync(&psy->changed_work);
205 power_supply_remove_triggers(psy); 205 power_supply_remove_triggers(psy);
206 device_unregister(psy->dev); 206 device_unregister(psy->dev);
207} 207}
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index 4a8ae3935b3b..4255f2358b13 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -112,6 +112,13 @@ static int calc_full_volt(int volt_val, int cur_val, int impedance)
112 return volt_val + cur_val * impedance / 1000; 112 return volt_val + cur_val * impedance / 1000;
113} 113}
114 114
115static int charge_finished(struct s3c_adc_bat *bat)
116{
117 return bat->pdata->gpio_inverted ?
118 !gpio_get_value(bat->pdata->gpio_charge_finished) :
119 gpio_get_value(bat->pdata->gpio_charge_finished);
120}
121
115static int s3c_adc_bat_get_property(struct power_supply *psy, 122static int s3c_adc_bat_get_property(struct power_supply *psy,
116 enum power_supply_property psp, 123 enum power_supply_property psp,
117 union power_supply_propval *val) 124 union power_supply_propval *val)
@@ -140,7 +147,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
140 147
141 if (bat->cable_plugged && 148 if (bat->cable_plugged &&
142 ((bat->pdata->gpio_charge_finished < 0) || 149 ((bat->pdata->gpio_charge_finished < 0) ||
143 !gpio_get_value(bat->pdata->gpio_charge_finished))) { 150 !charge_finished(bat))) {
144 lut = bat->pdata->lut_acin; 151 lut = bat->pdata->lut_acin;
145 lut_size = bat->pdata->lut_acin_cnt; 152 lut_size = bat->pdata->lut_acin_cnt;
146 } 153 }
@@ -236,8 +243,7 @@ static void s3c_adc_bat_work(struct work_struct *work)
236 } 243 }
237 } else { 244 } else {
238 if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) { 245 if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
239 is_charged = gpio_get_value( 246 is_charged = charge_finished(&main_bat);
240 main_bat.pdata->gpio_charge_finished);
241 if (is_charged) { 247 if (is_charged) {
242 if (bat->pdata->disable_charger) 248 if (bat->pdata->disable_charger)
243 bat->pdata->disable_charger(); 249 bat->pdata->disable_charger();
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index ee04936b2db5..53f0d3524fcd 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -332,7 +332,7 @@ static struct {
332static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state) 332static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
333{ 333{
334 /* flush all pending status updates */ 334 /* flush all pending status updates */
335 flush_scheduled_work(); 335 flush_work_sync(&bat_work);
336 return 0; 336 return 0;
337} 337}
338 338
@@ -422,7 +422,7 @@ err_psy_reg_jacket:
422err_psy_reg_main: 422err_psy_reg_main:
423 423
424 /* see comment in tosa_bat_remove */ 424 /* see comment in tosa_bat_remove */
425 flush_scheduled_work(); 425 cancel_work_sync(&bat_work);
426 426
427 i--; 427 i--;
428err_gpio: 428err_gpio:
@@ -445,12 +445,11 @@ static int __devexit tosa_bat_remove(struct platform_device *dev)
445 power_supply_unregister(&tosa_bat_main.psy); 445 power_supply_unregister(&tosa_bat_main.psy);
446 446
447 /* 447 /*
448 * now flush all pending work. 448 * Now cancel the bat_work. We won't get any more schedules,
449 * we won't get any more schedules, since all 449 * since all sources (isr and external_power_changed) are
450 * sources (isr and external_power_changed) 450 * unregistered now.
451 * are unregistered now.
452 */ 451 */
453 flush_scheduled_work(); 452 cancel_work_sync(&bat_work);
454 453
455 for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--) 454 for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
456 gpio_free(gpios[i].gpio); 455 gpio_free(gpios[i].gpio);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 5071d85ec12d..156559e56fa5 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -147,7 +147,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
147#ifdef CONFIG_PM 147#ifdef CONFIG_PM
148static int wm97xx_bat_suspend(struct device *dev) 148static int wm97xx_bat_suspend(struct device *dev)
149{ 149{
150 flush_scheduled_work(); 150 flush_work_sync(&bat_work);
151 return 0; 151 return 0;
152} 152}
153 153
@@ -273,7 +273,7 @@ static int __devexit wm97xx_bat_remove(struct platform_device *dev)
273 free_irq(gpio_to_irq(pdata->charge_gpio), dev); 273 free_irq(gpio_to_irq(pdata->charge_gpio), dev);
274 gpio_free(pdata->charge_gpio); 274 gpio_free(pdata->charge_gpio);
275 } 275 }
276 flush_scheduled_work(); 276 cancel_work_sync(&bat_work);
277 power_supply_unregister(&bat_ps); 277 power_supply_unregister(&bat_ps);
278 kfree(prop); 278 kfree(prop);
279 return 0; 279 return 0;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 85064a9f649e..e5ed52d71937 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -254,7 +254,7 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
254 struct z2_charger *charger = i2c_get_clientdata(client); 254 struct z2_charger *charger = i2c_get_clientdata(client);
255 struct z2_battery_info *info = charger->info; 255 struct z2_battery_info *info = charger->info;
256 256
257 flush_scheduled_work(); 257 cancel_work_sync(&charger->bat_work);
258 power_supply_unregister(&charger->batt_ps); 258 power_supply_unregister(&charger->batt_ps);
259 259
260 kfree(charger->batt_ps.properties); 260 kfree(charger->batt_ps.properties);
@@ -271,7 +271,9 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
271#ifdef CONFIG_PM 271#ifdef CONFIG_PM
272static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) 272static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
273{ 273{
274 flush_scheduled_work(); 274 struct z2_charger *charger = i2c_get_clientdata(client);
275
276 flush_work_sync(&charger->bat_work);
275 return 0; 277 return 0;
276} 278}
277 279
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 2728469d3884..82583b0ff82d 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -46,8 +46,6 @@ static void pps_ktimer_event(unsigned long ptr)
46 /* First of all we get the time stamp... */ 46 /* First of all we get the time stamp... */
47 pps_get_ts(&ts); 47 pps_get_ts(&ts);
48 48
49 dev_info(pps->dev, "PPS event at %lu\n", jiffies);
50
51 pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL); 49 pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
52 50
53 mod_timer(&ktimer, jiffies + HZ); 51 mod_timer(&ktimer, jiffies + HZ);
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 32221efd9ca9..c571d6dd8f61 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -163,7 +163,7 @@ static void parport_attach(struct parport *port)
163 } 163 }
164 164
165 device->pardev = parport_register_device(port, KBUILD_MODNAME, 165 device->pardev = parport_register_device(port, KBUILD_MODNAME,
166 NULL, NULL, parport_irq, 0, device); 166 NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
167 if (!device->pardev) { 167 if (!device->pardev) {
168 pr_err("couldn't register with %s\n", port->name); 168 pr_err("couldn't register with %s\n", port->name);
169 goto err_free; 169 goto err_free;
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index 5c32f8dacf56..b93af3ebb5ba 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -198,7 +198,7 @@ static void parport_attach(struct parport *port)
198 } 198 }
199 199
200 device.pardev = parport_register_device(port, KBUILD_MODNAME, 200 device.pardev = parport_register_device(port, KBUILD_MODNAME,
201 NULL, NULL, NULL, 0, &device); 201 NULL, NULL, NULL, PARPORT_FLAG_EXCL, &device);
202 if (!device.pardev) { 202 if (!device.pardev) {
203 pr_err("couldn't register with %s\n", port->name); 203 pr_err("couldn't register with %s\n", port->name);
204 return; 204 return;
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 467e82bd0929..a50391b6ba2a 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -943,6 +943,8 @@ static int rio_enum_complete(struct rio_mport *port)
943 * @port: Master port to send transactions 943 * @port: Master port to send transactions
944 * @destid: Current destination ID in network 944 * @destid: Current destination ID in network
945 * @hopcount: Number of hops into the network 945 * @hopcount: Number of hops into the network
946 * @prev: previous rio_dev
947 * @prev_port: previous port number
946 * 948 *
947 * Recursively discovers a RIO network. Transactions are sent via the 949 * Recursively discovers a RIO network. Transactions are sent via the
948 * master port passed in @port. 950 * master port passed in @port.
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 7568df6122ab..0ec49ca527a8 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -424,6 +424,9 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
424 } 424 }
425 } 425 }
426 426
427 if (pdata->buck_voltage_lock)
428 return -EINVAL;
429
427 /* no predefine regulator found */ 430 /* no predefine regulator found */
428 max8998->buck1_idx = (buck1_last_val % 2) + 2; 431 max8998->buck1_idx = (buck1_last_val % 2) + 2;
429 dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n", 432 dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
@@ -451,18 +454,26 @@ buck1_exit:
451 "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n" 454 "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
452 , i, max8998->buck2_vol[0], max8998->buck2_vol[1]); 455 , i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
453 if (gpio_is_valid(pdata->buck2_set3)) { 456 if (gpio_is_valid(pdata->buck2_set3)) {
454 if (max8998->buck2_vol[0] == i) { 457
455 max8998->buck1_idx = 0; 458 /* check if requested voltage */
456 buck2_gpio_set(pdata->buck2_set3, 0); 459 /* value is already defined */
457 } else { 460 for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
458 max8998->buck1_idx = 1; 461 if (max8998->buck2_vol[j] == i) {
459 ret = max8998_get_voltage_register(rdev, &reg, 462 max8998->buck2_idx = j;
460 &shift, 463 buck2_gpio_set(pdata->buck2_set3, j);
461 &mask); 464 goto buck2_exit;
462 ret = max8998_write_reg(i2c, reg, i); 465 }
463 max8998->buck2_vol[1] = i;
464 buck2_gpio_set(pdata->buck2_set3, 1);
465 } 466 }
467
468 if (pdata->buck_voltage_lock)
469 return -EINVAL;
470
471 max8998_get_voltage_register(rdev,
472 &reg, &shift, &mask);
473 ret = max8998_write_reg(i2c, reg, i);
474 max8998->buck2_vol[max8998->buck2_idx] = i;
475 buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
476buck2_exit:
466 dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name, 477 dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
467 gpio_get_value(pdata->buck2_set3)); 478 gpio_get_value(pdata->buck2_set3));
468 } else { 479 } else {
@@ -707,6 +718,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
707 platform_set_drvdata(pdev, max8998); 718 platform_set_drvdata(pdev, max8998);
708 i2c = max8998->iodev->i2c; 719 i2c = max8998->iodev->i2c;
709 720
721 max8998->buck1_idx = pdata->buck1_default_idx;
722 max8998->buck2_idx = pdata->buck2_default_idx;
723
710 /* NOTE: */ 724 /* NOTE: */
711 /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */ 725 /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */
712 /* will be displayed */ 726 /* will be displayed */
@@ -739,23 +753,46 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
739 i = 0; 753 i = 0;
740 while (buck12_voltage_map_desc.min + 754 while (buck12_voltage_map_desc.min +
741 buck12_voltage_map_desc.step*i 755 buck12_voltage_map_desc.step*i
742 != (pdata->buck1_max_voltage1 / 1000)) 756 < (pdata->buck1_voltage1 / 1000))
743 i++; 757 i++;
744 printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
745 max8998->buck1_vol[0] = i; 758 max8998->buck1_vol[0] = i;
746 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); 759 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
760 if (ret)
761 return ret;
747 762
748 /* Set predefined value for BUCK1 register 2 */ 763 /* Set predefined value for BUCK1 register 2 */
749 i = 0; 764 i = 0;
750 while (buck12_voltage_map_desc.min + 765 while (buck12_voltage_map_desc.min +
751 buck12_voltage_map_desc.step*i 766 buck12_voltage_map_desc.step*i
752 != (pdata->buck1_max_voltage2 / 1000)) 767 < (pdata->buck1_voltage2 / 1000))
753 i++; 768 i++;
754 769
755 max8998->buck1_vol[1] = i; 770 max8998->buck1_vol[1] = i;
756 printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx); 771 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
757 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i) 772 if (ret)
758 + ret; 773 return ret;
774
775 /* Set predefined value for BUCK1 register 3 */
776 i = 0;
777 while (buck12_voltage_map_desc.min +
778 buck12_voltage_map_desc.step*i
779 < (pdata->buck1_voltage3 / 1000))
780 i++;
781
782 max8998->buck1_vol[2] = i;
783 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
784 if (ret)
785 return ret;
786
787 /* Set predefined value for BUCK1 register 4 */
788 i = 0;
789 while (buck12_voltage_map_desc.min +
790 buck12_voltage_map_desc.step*i
791 < (pdata->buck1_voltage4 / 1000))
792 i++;
793
794 max8998->buck1_vol[3] = i;
795 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
759 if (ret) 796 if (ret)
760 return ret; 797 return ret;
761 798
@@ -772,18 +809,28 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
772 gpio_direction_output(pdata->buck2_set3, 809 gpio_direction_output(pdata->buck2_set3,
773 max8998->buck2_idx & 0x1); 810 max8998->buck2_idx & 0x1);
774 811
775 /* BUCK2 - set preset default voltage value to buck2_vol[0] */ 812 /* BUCK2 register 1 */
776 i = 0; 813 i = 0;
777 while (buck12_voltage_map_desc.min + 814 while (buck12_voltage_map_desc.min +
778 buck12_voltage_map_desc.step*i 815 buck12_voltage_map_desc.step*i
779 != (pdata->buck2_max_voltage / 1000)) 816 < (pdata->buck2_voltage1 / 1000))
780 i++; 817 i++;
781 printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
782 max8998->buck2_vol[0] = i; 818 max8998->buck2_vol[0] = i;
783 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); 819 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
784 if (ret) 820 if (ret)
785 return ret; 821 return ret;
786 822
823 /* BUCK2 register 2 */
824 i = 0;
825 while (buck12_voltage_map_desc.min +
826 buck12_voltage_map_desc.step*i
827 < (pdata->buck2_voltage2 / 1000))
828 i++;
829 printk(KERN_ERR "i2:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
830 max8998->buck2_vol[1] = i;
831 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
832 if (ret)
833 return ret;
787 } 834 }
788 835
789 for (i = 0; i < pdata->num_regulators; i++) { 836 for (i = 0; i < pdata->num_regulators; i++) {
@@ -835,6 +882,12 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
835 return 0; 882 return 0;
836} 883}
837 884
885static const struct platform_device_id max8998_pmic_id[] = {
886 { "max8998-pmic", TYPE_MAX8998 },
887 { "lp3974-pmic", TYPE_LP3974 },
888 { }
889};
890
838static struct platform_driver max8998_pmic_driver = { 891static struct platform_driver max8998_pmic_driver = {
839 .driver = { 892 .driver = {
840 .name = "max8998-pmic", 893 .name = "max8998-pmic",
@@ -842,6 +895,7 @@ static struct platform_driver max8998_pmic_driver = {
842 }, 895 },
843 .probe = max8998_pmic_probe, 896 .probe = max8998_pmic_probe,
844 .remove = __devexit_p(max8998_pmic_remove), 897 .remove = __devexit_p(max8998_pmic_remove),
898 .id_table = max8998_pmic_id,
845}; 899};
846 900
847static int __init max8998_pmic_init(void) 901static int __init max8998_pmic_init(void)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4941cade319f..cdd97192dc69 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,18 +97,6 @@ config RTC_INTF_DEV
97 97
98 If unsure, say Y. 98 If unsure, say Y.
99 99
100config RTC_INTF_DEV_UIE_EMUL
101 bool "RTC UIE emulation on dev interface"
102 depends on RTC_INTF_DEV
103 help
104 Provides an emulation for RTC_UIE if the underlying rtc chip
105 driver does not expose RTC_UIE ioctls. Those requests generate
106 once-per-second update interrupts, used for synchronization.
107
108 The emulation code will read the time from the hardware
109 clock several times per second, please enable this option
110 only if you know that you really need it.
111
112config RTC_DRV_TEST 100config RTC_DRV_TEST
113 tristate "Test driver/device" 101 tristate "Test driver/device"
114 help 102 help
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 9583cbcc6b79..c404b61386bf 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -143,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
143 rtc->id = id; 143 rtc->id = id;
144 rtc->ops = ops; 144 rtc->ops = ops;
145 rtc->owner = owner; 145 rtc->owner = owner;
146 rtc->irq_freq = 1;
146 rtc->max_user_freq = 64; 147 rtc->max_user_freq = 64;
147 rtc->dev.parent = dev; 148 rtc->dev.parent = dev;
148 rtc->dev.class = rtc_class; 149 rtc->dev.class = rtc_class;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 90384b9f6b2c..a0c01967244d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -16,6 +16,9 @@
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18 18
19static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
20static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
21
19static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 22static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
20{ 23{
21 int err; 24 int err;
@@ -120,12 +123,18 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
120 err = mutex_lock_interruptible(&rtc->ops_lock); 123 err = mutex_lock_interruptible(&rtc->ops_lock);
121 if (err) 124 if (err)
122 return err; 125 return err;
123 alarm->enabled = rtc->aie_timer.enabled; 126 if (rtc->ops == NULL)
124 if (alarm->enabled) 127 err = -ENODEV;
128 else if (!rtc->ops->read_alarm)
129 err = -EINVAL;
130 else {
131 memset(alarm, 0, sizeof(struct rtc_wkalrm));
132 alarm->enabled = rtc->aie_timer.enabled;
125 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); 133 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
134 }
126 mutex_unlock(&rtc->ops_lock); 135 mutex_unlock(&rtc->ops_lock);
127 136
128 return 0; 137 return err;
129} 138}
130EXPORT_SYMBOL_GPL(rtc_read_alarm); 139EXPORT_SYMBOL_GPL(rtc_read_alarm);
131 140
@@ -175,16 +184,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
175 return err; 184 return err;
176 if (rtc->aie_timer.enabled) { 185 if (rtc->aie_timer.enabled) {
177 rtc_timer_remove(rtc, &rtc->aie_timer); 186 rtc_timer_remove(rtc, &rtc->aie_timer);
178 rtc->aie_timer.enabled = 0;
179 } 187 }
180 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); 188 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
181 rtc->aie_timer.period = ktime_set(0, 0); 189 rtc->aie_timer.period = ktime_set(0, 0);
182 if (alarm->enabled) { 190 if (alarm->enabled) {
183 rtc->aie_timer.enabled = 1; 191 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
184 rtc_timer_enqueue(rtc, &rtc->aie_timer);
185 } 192 }
186 mutex_unlock(&rtc->ops_lock); 193 mutex_unlock(&rtc->ops_lock);
187 return 0; 194 return err;
188} 195}
189EXPORT_SYMBOL_GPL(rtc_set_alarm); 196EXPORT_SYMBOL_GPL(rtc_set_alarm);
190 197
@@ -195,15 +202,15 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
195 return err; 202 return err;
196 203
197 if (rtc->aie_timer.enabled != enabled) { 204 if (rtc->aie_timer.enabled != enabled) {
198 if (enabled) { 205 if (enabled)
199 rtc->aie_timer.enabled = 1; 206 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
200 rtc_timer_enqueue(rtc, &rtc->aie_timer); 207 else
201 } else {
202 rtc_timer_remove(rtc, &rtc->aie_timer); 208 rtc_timer_remove(rtc, &rtc->aie_timer);
203 rtc->aie_timer.enabled = 0;
204 }
205 } 209 }
206 210
211 if (err)
212 return err;
213
207 if (!rtc->ops) 214 if (!rtc->ops)
208 err = -ENODEV; 215 err = -ENODEV;
209 else if (!rtc->ops->alarm_irq_enable) 216 else if (!rtc->ops->alarm_irq_enable)
@@ -235,12 +242,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
235 now = rtc_tm_to_ktime(tm); 242 now = rtc_tm_to_ktime(tm);
236 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); 243 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
237 rtc->uie_rtctimer.period = ktime_set(1, 0); 244 rtc->uie_rtctimer.period = ktime_set(1, 0);
238 rtc->uie_rtctimer.enabled = 1; 245 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
239 rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); 246 } else
240 } else {
241 rtc_timer_remove(rtc, &rtc->uie_rtctimer); 247 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
242 rtc->uie_rtctimer.enabled = 0;
243 }
244 248
245out: 249out:
246 mutex_unlock(&rtc->ops_lock); 250 mutex_unlock(&rtc->ops_lock);
@@ -460,6 +464,9 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
460 int err = 0; 464 int err = 0;
461 unsigned long flags; 465 unsigned long flags;
462 466
467 if (freq <= 0)
468 return -EINVAL;
469
463 spin_lock_irqsave(&rtc->irq_task_lock, flags); 470 spin_lock_irqsave(&rtc->irq_task_lock, flags);
464 if (rtc->irq_task != NULL && task == NULL) 471 if (rtc->irq_task != NULL && task == NULL)
465 err = -EBUSY; 472 err = -EBUSY;
@@ -488,10 +495,13 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
488 * Enqueues a timer onto the rtc devices timerqueue and sets 495 * Enqueues a timer onto the rtc devices timerqueue and sets
489 * the next alarm event appropriately. 496 * the next alarm event appropriately.
490 * 497 *
498 * Sets the enabled bit on the added timer.
499 *
491 * Must hold ops_lock for proper serialization of timerqueue 500 * Must hold ops_lock for proper serialization of timerqueue
492 */ 501 */
493void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) 502static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
494{ 503{
504 timer->enabled = 1;
495 timerqueue_add(&rtc->timerqueue, &timer->node); 505 timerqueue_add(&rtc->timerqueue, &timer->node);
496 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) { 506 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
497 struct rtc_wkalrm alarm; 507 struct rtc_wkalrm alarm;
@@ -501,7 +511,13 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
501 err = __rtc_set_alarm(rtc, &alarm); 511 err = __rtc_set_alarm(rtc, &alarm);
502 if (err == -ETIME) 512 if (err == -ETIME)
503 schedule_work(&rtc->irqwork); 513 schedule_work(&rtc->irqwork);
514 else if (err) {
515 timerqueue_del(&rtc->timerqueue, &timer->node);
516 timer->enabled = 0;
517 return err;
518 }
504 } 519 }
520 return 0;
505} 521}
506 522
507/** 523/**
@@ -512,13 +528,15 @@ void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
512 * Removes a timer onto the rtc devices timerqueue and sets 528 * Removes a timer onto the rtc devices timerqueue and sets
513 * the next alarm event appropriately. 529 * the next alarm event appropriately.
514 * 530 *
531 * Clears the enabled bit on the removed timer.
532 *
515 * Must hold ops_lock for proper serialization of timerqueue 533 * Must hold ops_lock for proper serialization of timerqueue
516 */ 534 */
517void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) 535static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
518{ 536{
519 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); 537 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
520 timerqueue_del(&rtc->timerqueue, &timer->node); 538 timerqueue_del(&rtc->timerqueue, &timer->node);
521 539 timer->enabled = 0;
522 if (next == &timer->node) { 540 if (next == &timer->node) {
523 struct rtc_wkalrm alarm; 541 struct rtc_wkalrm alarm;
524 int err; 542 int err;
@@ -626,8 +644,7 @@ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
626 timer->node.expires = expires; 644 timer->node.expires = expires;
627 timer->period = period; 645 timer->period = period;
628 646
629 timer->enabled = 1; 647 ret = rtc_timer_enqueue(rtc, timer);
630 rtc_timer_enqueue(rtc, timer);
631 648
632 mutex_unlock(&rtc->ops_lock); 649 mutex_unlock(&rtc->ops_lock);
633 return ret; 650 return ret;
@@ -645,7 +662,6 @@ int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
645 mutex_lock(&rtc->ops_lock); 662 mutex_lock(&rtc->ops_lock);
646 if (timer->enabled) 663 if (timer->enabled)
647 rtc_timer_remove(rtc, timer); 664 rtc_timer_remove(rtc, timer);
648 timer->enabled = 0;
649 mutex_unlock(&rtc->ops_lock); 665 mutex_unlock(&rtc->ops_lock);
650 return ret; 666 return ret;
651} 667}
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index f22dee35f330..3f7bc6b9fefa 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -20,6 +20,7 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mfd/max8998.h> 21#include <linux/mfd/max8998.h>
22#include <linux/mfd/max8998-private.h> 22#include <linux/mfd/max8998-private.h>
23#include <linux/delay.h>
23 24
24#define MAX8998_RTC_SEC 0x00 25#define MAX8998_RTC_SEC 0x00
25#define MAX8998_RTC_MIN 0x01 26#define MAX8998_RTC_MIN 0x01
@@ -73,6 +74,7 @@ struct max8998_rtc_info {
73 struct i2c_client *rtc; 74 struct i2c_client *rtc;
74 struct rtc_device *rtc_dev; 75 struct rtc_device *rtc_dev;
75 int irq; 76 int irq;
77 bool lp3974_bug_workaround;
76}; 78};
77 79
78static void max8998_data_to_tm(u8 *data, struct rtc_time *tm) 80static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
@@ -124,10 +126,16 @@ static int max8998_rtc_set_time(struct device *dev, struct rtc_time *tm)
124{ 126{
125 struct max8998_rtc_info *info = dev_get_drvdata(dev); 127 struct max8998_rtc_info *info = dev_get_drvdata(dev);
126 u8 data[8]; 128 u8 data[8];
129 int ret;
127 130
128 max8998_tm_to_data(tm, data); 131 max8998_tm_to_data(tm, data);
129 132
130 return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data); 133 ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
134
135 if (info->lp3974_bug_workaround)
136 msleep(2000);
137
138 return ret;
131} 139}
132 140
133static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) 141static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -163,12 +171,29 @@ static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
163 171
164static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info) 172static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
165{ 173{
166 return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0); 174 int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
175
176 if (info->lp3974_bug_workaround)
177 msleep(2000);
178
179 return ret;
167} 180}
168 181
169static int max8998_rtc_start_alarm(struct max8998_rtc_info *info) 182static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
170{ 183{
171 return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77); 184 int ret;
185 u8 alarm0_conf = 0x77;
186
187 /* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */
188 if (info->lp3974_bug_workaround)
189 alarm0_conf = 0x57;
190
191 ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf);
192
193 if (info->lp3974_bug_workaround)
194 msleep(2000);
195
196 return ret;
172} 197}
173 198
174static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) 199static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -187,10 +212,13 @@ static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
187 if (ret < 0) 212 if (ret < 0)
188 return ret; 213 return ret;
189 214
215 if (info->lp3974_bug_workaround)
216 msleep(2000);
217
190 if (alrm->enabled) 218 if (alrm->enabled)
191 return max8998_rtc_start_alarm(info); 219 ret = max8998_rtc_start_alarm(info);
192 220
193 return 0; 221 return ret;
194} 222}
195 223
196static int max8998_rtc_alarm_irq_enable(struct device *dev, 224static int max8998_rtc_alarm_irq_enable(struct device *dev,
@@ -224,6 +252,7 @@ static const struct rtc_class_ops max8998_rtc_ops = {
224static int __devinit max8998_rtc_probe(struct platform_device *pdev) 252static int __devinit max8998_rtc_probe(struct platform_device *pdev)
225{ 253{
226 struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent); 254 struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
255 struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev);
227 struct max8998_rtc_info *info; 256 struct max8998_rtc_info *info;
228 int ret; 257 int ret;
229 258
@@ -249,10 +278,18 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
249 278
250 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, 279 ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
251 "rtc-alarm0", info); 280 "rtc-alarm0", info);
281
252 if (ret < 0) 282 if (ret < 0)
253 dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", 283 dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
254 info->irq, ret); 284 info->irq, ret);
255 285
286 dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
287 if (pdata->rtc_delay) {
288 info->lp3974_bug_workaround = true;
289 dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
290 " RTC updates will be extremely slow.\n");
291 }
292
256 return 0; 293 return 0;
257 294
258out_rtc: 295out_rtc:
@@ -273,6 +310,12 @@ static int __devexit max8998_rtc_remove(struct platform_device *pdev)
273 return 0; 310 return 0;
274} 311}
275 312
313static const struct platform_device_id max8998_rtc_id[] = {
314 { "max8998-rtc", TYPE_MAX8998 },
315 { "lp3974-rtc", TYPE_LP3974 },
316 { }
317};
318
276static struct platform_driver max8998_rtc_driver = { 319static struct platform_driver max8998_rtc_driver = {
277 .driver = { 320 .driver = {
278 .name = "max8998-rtc", 321 .name = "max8998-rtc",
@@ -280,6 +323,7 @@ static struct platform_driver max8998_rtc_driver = {
280 }, 323 },
281 .probe = max8998_rtc_probe, 324 .probe = max8998_rtc_probe,
282 .remove = __devexit_p(max8998_rtc_remove), 325 .remove = __devexit_p(max8998_rtc_remove),
326 .id_table = max8998_rtc_id,
283}; 327};
284 328
285static int __init max8998_rtc_init(void) 329static int __init max8998_rtc_init(void)
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc30a84c..242bbf86c74a 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
81 81
82static int rtc_proc_open(struct inode *inode, struct file *file) 82static int rtc_proc_open(struct inode *inode, struct file *file)
83{ 83{
84 int ret;
84 struct rtc_device *rtc = PDE(inode)->data; 85 struct rtc_device *rtc = PDE(inode)->data;
85 86
86 if (!try_module_get(THIS_MODULE)) 87 if (!try_module_get(THIS_MODULE))
87 return -ENODEV; 88 return -ENODEV;
88 89
89 return single_open(file, rtc_proc_show, rtc); 90 ret = single_open(file, rtc_proc_show, rtc);
91 if (ret)
92 module_put(THIS_MODULE);
93 return ret;
90} 94}
91 95
92static int rtc_proc_release(struct inode *inode, struct file *file) 96static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805dcdff..2b771f18d1ad 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
319 319
320 private = (struct dasd_eckd_private *) device->private; 320 private = (struct dasd_eckd_private *) device->private;
321 lcu = private->lcu; 321 lcu = private->lcu;
322 /* nothing to do if already disconnected */
323 if (!lcu)
324 return;
322 device->discipline->get_uid(device, &uid); 325 device->discipline->get_uid(device, &uid);
323 spin_lock_irqsave(&lcu->lock, flags); 326 spin_lock_irqsave(&lcu->lock, flags);
324 list_del_init(&device->alias_list); 327 list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
680 683
681 private = (struct dasd_eckd_private *) device->private; 684 private = (struct dasd_eckd_private *) device->private;
682 lcu = private->lcu; 685 lcu = private->lcu;
686 /* nothing to do if already removed */
687 if (!lcu)
688 return 0;
683 spin_lock_irqsave(&lcu->lock, flags); 689 spin_lock_irqsave(&lcu->lock, flags);
684 _remove_device_from_lcu(lcu, device); 690 _remove_device_from_lcu(lcu, device);
685 spin_unlock_irqrestore(&lcu->lock, flags); 691 spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e8391b89eff4..b7eaff9ca19e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1835,6 +1835,7 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1835 * available again. Kick re-detection. 1835 * available again. Kick re-detection.
1836 */ 1836 */
1837 cdev->private->flags.resuming = 1; 1837 cdev->private->flags.resuming = 1;
1838 cdev->private->path_new_mask = LPM_ANYPATH;
1838 css_schedule_eval(sch->schid); 1839 css_schedule_eval(sch->schid);
1839 spin_unlock_irq(sch->lock); 1840 spin_unlock_irq(sch->lock);
1840 css_complete_work(); 1841 css_complete_work();
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b9bce2..5640c89cd9de 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
476static int get_inbound_buffer_frontier(struct qdio_q *q) 476static int get_inbound_buffer_frontier(struct qdio_q *q)
477{ 477{
478 int count, stop; 478 int count, stop;
479 unsigned char state; 479 unsigned char state = 0;
480 480
481 /* 481 /*
482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data)
643static int get_outbound_buffer_frontier(struct qdio_q *q) 643static int get_outbound_buffer_frontier(struct qdio_q *q)
644{ 644{
645 int count, stop; 645 int count, stop;
646 unsigned char state; 646 unsigned char state = 0;
647 647
648 if (need_siga_sync(q)) 648 if (need_siga_sync(q))
649 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 649 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0a3266..b6a6356d09b3 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
565 struct iucv_event ev; 565 struct iucv_event ev;
566 int rc; 566 int rc;
567 567
568 if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) 568 if (memcmp(iucvMagic, ipuser, 16))
569 /* ipuser must match iucvMagic. */ 569 /* ipuser must match iucvMagic. */
570 return -EINVAL; 570 return -EINVAL;
571 rc = -EINVAL; 571 rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f848bfc12f..019ae58ab913 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -988,16 +988,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
988 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 988 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
989 if (chp_dsc != NULL) { 989 if (chp_dsc != NULL) {
990 /* CHPP field bit 6 == 1 -> single queue */ 990 /* CHPP field bit 6 == 1 -> single queue */
991 if ((chp_dsc->chpp & 0x02) == 0x02) 991 if ((chp_dsc->chpp & 0x02) == 0x02) {
992 if ((atomic_read(&card->qdio.state) !=
993 QETH_QDIO_UNINITIALIZED) &&
994 (card->qdio.no_out_queues == 4))
995 /* change from 4 to 1 outbound queues */
996 qeth_free_qdio_buffers(card);
992 card->qdio.no_out_queues = 1; 997 card->qdio.no_out_queues = 1;
998 if (card->qdio.default_out_queue != 0)
999 dev_info(&card->gdev->dev,
1000 "Priority Queueing not supported\n");
1001 card->qdio.default_out_queue = 0;
1002 } else {
1003 if ((atomic_read(&card->qdio.state) !=
1004 QETH_QDIO_UNINITIALIZED) &&
1005 (card->qdio.no_out_queues == 1)) {
1006 /* change from 1 to 4 outbound queues */
1007 qeth_free_qdio_buffers(card);
1008 card->qdio.default_out_queue = 2;
1009 }
1010 card->qdio.no_out_queues = 4;
1011 }
993 card->info.func_level = 0x4100 + chp_dsc->desc; 1012 card->info.func_level = 0x4100 + chp_dsc->desc;
994 kfree(chp_dsc); 1013 kfree(chp_dsc);
995 } 1014 }
996 if (card->qdio.no_out_queues == 1) {
997 card->qdio.default_out_queue = 0;
998 dev_info(&card->gdev->dev,
999 "Priority Queueing not supported\n");
1000 }
1001 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1015 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1002 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1016 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1003 return; 1017 return;
@@ -1832,33 +1846,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1832 } 1846 }
1833} 1847}
1834 1848
1835static inline int qeth_get_max_mtu_for_card(int cardtype)
1836{
1837 switch (cardtype) {
1838
1839 case QETH_CARD_TYPE_UNKNOWN:
1840 case QETH_CARD_TYPE_OSD:
1841 case QETH_CARD_TYPE_OSN:
1842 case QETH_CARD_TYPE_OSM:
1843 case QETH_CARD_TYPE_OSX:
1844 return 61440;
1845 case QETH_CARD_TYPE_IQD:
1846 return 57344;
1847 default:
1848 return 1500;
1849 }
1850}
1851
1852static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1853{
1854 switch (cardtype) {
1855 case QETH_CARD_TYPE_IQD:
1856 return 1;
1857 default:
1858 return 0;
1859 }
1860}
1861
1862static inline int qeth_get_mtu_outof_framesize(int framesize) 1849static inline int qeth_get_mtu_outof_framesize(int framesize)
1863{ 1850{
1864 switch (framesize) { 1851 switch (framesize) {
@@ -1881,10 +1868,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1881 case QETH_CARD_TYPE_OSD: 1868 case QETH_CARD_TYPE_OSD:
1882 case QETH_CARD_TYPE_OSM: 1869 case QETH_CARD_TYPE_OSM:
1883 case QETH_CARD_TYPE_OSX: 1870 case QETH_CARD_TYPE_OSX:
1884 return ((mtu >= 576) && (mtu <= 61440));
1885 case QETH_CARD_TYPE_IQD: 1871 case QETH_CARD_TYPE_IQD:
1886 return ((mtu >= 576) && 1872 return ((mtu >= 576) &&
1887 (mtu <= card->info.max_mtu + 4096 - 32)); 1873 (mtu <= card->info.max_mtu));
1888 case QETH_CARD_TYPE_OSN: 1874 case QETH_CARD_TYPE_OSN:
1889 case QETH_CARD_TYPE_UNKNOWN: 1875 case QETH_CARD_TYPE_UNKNOWN:
1890 default: 1876 default:
@@ -1907,7 +1893,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1907 memcpy(&card->token.ulp_filter_r, 1893 memcpy(&card->token.ulp_filter_r,
1908 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 1894 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1909 QETH_MPC_TOKEN_LENGTH); 1895 QETH_MPC_TOKEN_LENGTH);
1910 if (qeth_get_mtu_out_of_mpc(card->info.type)) { 1896 if (card->info.type == QETH_CARD_TYPE_IQD) {
1911 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 1897 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1912 mtu = qeth_get_mtu_outof_framesize(framesize); 1898 mtu = qeth_get_mtu_outof_framesize(framesize);
1913 if (!mtu) { 1899 if (!mtu) {
@@ -1915,12 +1901,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1915 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); 1901 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1916 return 0; 1902 return 0;
1917 } 1903 }
1918 card->info.max_mtu = mtu; 1904 if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
1905 /* frame size has changed */
1906 if (card->dev &&
1907 ((card->dev->mtu == card->info.initial_mtu) ||
1908 (card->dev->mtu > mtu)))
1909 card->dev->mtu = mtu;
1910 qeth_free_qdio_buffers(card);
1911 }
1919 card->info.initial_mtu = mtu; 1912 card->info.initial_mtu = mtu;
1913 card->info.max_mtu = mtu;
1920 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; 1914 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1921 } else { 1915 } else {
1922 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); 1916 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1923 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); 1917 card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
1918 iob->data);
1924 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1919 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1925 } 1920 }
1926 1921
@@ -3775,6 +3770,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3775 } 3770 }
3776} 3771}
3777 3772
3773static void qeth_determine_capabilities(struct qeth_card *card)
3774{
3775 int rc;
3776 int length;
3777 char *prcd;
3778 struct ccw_device *ddev;
3779 int ddev_offline = 0;
3780
3781 QETH_DBF_TEXT(SETUP, 2, "detcapab");
3782 ddev = CARD_DDEV(card);
3783 if (!ddev->online) {
3784 ddev_offline = 1;
3785 rc = ccw_device_set_online(ddev);
3786 if (rc) {
3787 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3788 goto out;
3789 }
3790 }
3791
3792 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
3793 if (rc) {
3794 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
3795 dev_name(&card->gdev->dev), rc);
3796 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3797 goto out_offline;
3798 }
3799 qeth_configure_unitaddr(card, prcd);
3800 qeth_configure_blkt_default(card, prcd);
3801 kfree(prcd);
3802
3803 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
3804 if (rc)
3805 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3806
3807out_offline:
3808 if (ddev_offline == 1)
3809 ccw_device_set_offline(ddev);
3810out:
3811 return;
3812}
3813
3778static int qeth_qdio_establish(struct qeth_card *card) 3814static int qeth_qdio_establish(struct qeth_card *card)
3779{ 3815{
3780 struct qdio_initialize init_data; 3816 struct qdio_initialize init_data;
@@ -3905,6 +3941,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
3905 3941
3906 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3942 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3907 atomic_set(&card->force_alloc_skb, 0); 3943 atomic_set(&card->force_alloc_skb, 0);
3944 qeth_get_channel_path_desc(card);
3908retry: 3945retry:
3909 if (retries) 3946 if (retries)
3910 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3947 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3970,7 @@ retriable:
3933 else 3970 else
3934 goto retry; 3971 goto retry;
3935 } 3972 }
3973 qeth_determine_capabilities(card);
3936 qeth_init_tokens(card); 3974 qeth_init_tokens(card);
3937 qeth_init_func_level(card); 3975 qeth_init_func_level(card);
3938 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 3976 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4240,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
4202 card->discipline.ccwgdriver = NULL; 4240 card->discipline.ccwgdriver = NULL;
4203} 4241}
4204 4242
4205static void qeth_determine_capabilities(struct qeth_card *card)
4206{
4207 int rc;
4208 int length;
4209 char *prcd;
4210
4211 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4212 rc = ccw_device_set_online(CARD_DDEV(card));
4213 if (rc) {
4214 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4215 goto out;
4216 }
4217
4218
4219 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4220 if (rc) {
4221 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4222 dev_name(&card->gdev->dev), rc);
4223 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4224 goto out_offline;
4225 }
4226 qeth_configure_unitaddr(card, prcd);
4227 qeth_configure_blkt_default(card, prcd);
4228 kfree(prcd);
4229
4230 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4231 if (rc)
4232 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4233
4234out_offline:
4235 ccw_device_set_offline(CARD_DDEV(card));
4236out:
4237 return;
4238}
4239
4240static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4243static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4241{ 4244{
4242 struct qeth_card *card; 4245 struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b664781..ada0fe782373 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -573,13 +573,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
573 case IPA_RC_L2_DUP_LAYER3_MAC: 573 case IPA_RC_L2_DUP_LAYER3_MAC:
574 dev_warn(&card->gdev->dev, 574 dev_warn(&card->gdev->dev,
575 "MAC address %pM already exists\n", 575 "MAC address %pM already exists\n",
576 card->dev->dev_addr); 576 cmd->data.setdelmac.mac);
577 break; 577 break;
578 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 578 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
579 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 579 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
580 dev_warn(&card->gdev->dev, 580 dev_warn(&card->gdev->dev,
581 "MAC address %pM is not authorized\n", 581 "MAC address %pM is not authorized\n",
582 card->dev->dev_addr); 582 cmd->data.setdelmac.mac);
583 break; 583 break;
584 default: 584 default:
585 break; 585 break;
@@ -831,12 +831,14 @@ tx_drop:
831 return NETDEV_TX_OK; 831 return NETDEV_TX_OK;
832} 832}
833 833
834static int qeth_l2_open(struct net_device *dev) 834static int __qeth_l2_open(struct net_device *dev)
835{ 835{
836 struct qeth_card *card = dev->ml_priv; 836 struct qeth_card *card = dev->ml_priv;
837 int rc = 0; 837 int rc = 0;
838 838
839 QETH_CARD_TEXT(card, 4, "qethopen"); 839 QETH_CARD_TEXT(card, 4, "qethopen");
840 if (card->state == CARD_STATE_UP)
841 return rc;
840 if (card->state != CARD_STATE_SOFTSETUP) 842 if (card->state != CARD_STATE_SOFTSETUP)
841 return -ENODEV; 843 return -ENODEV;
842 844
@@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev)
857 return rc; 859 return rc;
858} 860}
859 861
862static int qeth_l2_open(struct net_device *dev)
863{
864 struct qeth_card *card = dev->ml_priv;
865
866 QETH_CARD_TEXT(card, 5, "qethope_");
867 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
868 QETH_CARD_TEXT(card, 3, "openREC");
869 return -ERESTARTSYS;
870 }
871 return __qeth_l2_open(dev);
872}
873
860static int qeth_l2_stop(struct net_device *dev) 874static int qeth_l2_stop(struct net_device *dev)
861{ 875{
862 struct qeth_card *card = dev->ml_priv; 876 struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@ contin:
1046 if (recover_flag == CARD_STATE_RECOVER) { 1060 if (recover_flag == CARD_STATE_RECOVER) {
1047 if (recovery_mode && 1061 if (recovery_mode &&
1048 card->info.type != QETH_CARD_TYPE_OSN) { 1062 card->info.type != QETH_CARD_TYPE_OSN) {
1049 qeth_l2_open(card->dev); 1063 __qeth_l2_open(card->dev);
1050 } else { 1064 } else {
1051 rtnl_lock(); 1065 rtnl_lock();
1052 dev_open(card->dev); 1066 dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e465bfc4..d09b0c44fc3d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card,
2998 */ 2998 */
2999 if (iph->protocol == IPPROTO_UDP) 2999 if (iph->protocol == IPPROTO_UDP)
3000 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; 3000 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
3001 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; 3001 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
3002 QETH_HDR_EXT_CSUM_HDR_REQ;
3003 iph->check = 0;
3002 if (card->options.performance_stats) 3004 if (card->options.performance_stats)
3003 card->perf_stats.tx_csum++; 3005 card->perf_stats.tx_csum++;
3004} 3006}
@@ -3240,12 +3242,14 @@ tx_drop:
3240 return NETDEV_TX_OK; 3242 return NETDEV_TX_OK;
3241} 3243}
3242 3244
3243static int qeth_l3_open(struct net_device *dev) 3245static int __qeth_l3_open(struct net_device *dev)
3244{ 3246{
3245 struct qeth_card *card = dev->ml_priv; 3247 struct qeth_card *card = dev->ml_priv;
3246 int rc = 0; 3248 int rc = 0;
3247 3249
3248 QETH_CARD_TEXT(card, 4, "qethopen"); 3250 QETH_CARD_TEXT(card, 4, "qethopen");
3251 if (card->state == CARD_STATE_UP)
3252 return rc;
3249 if (card->state != CARD_STATE_SOFTSETUP) 3253 if (card->state != CARD_STATE_SOFTSETUP)
3250 return -ENODEV; 3254 return -ENODEV;
3251 card->data.state = CH_STATE_UP; 3255 card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev)
3260 return rc; 3264 return rc;
3261} 3265}
3262 3266
3267static int qeth_l3_open(struct net_device *dev)
3268{
3269 struct qeth_card *card = dev->ml_priv;
3270
3271 QETH_CARD_TEXT(card, 5, "qethope_");
3272 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
3273 QETH_CARD_TEXT(card, 3, "openREC");
3274 return -ERESTARTSYS;
3275 }
3276 return __qeth_l3_open(dev);
3277}
3278
3263static int qeth_l3_stop(struct net_device *dev) 3279static int qeth_l3_stop(struct net_device *dev)
3264{ 3280{
3265 struct qeth_card *card = dev->ml_priv; 3281 struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@ contin:
3564 netif_carrier_off(card->dev); 3580 netif_carrier_off(card->dev);
3565 if (recover_flag == CARD_STATE_RECOVER) { 3581 if (recover_flag == CARD_STATE_RECOVER) {
3566 if (recovery_mode) 3582 if (recovery_mode)
3567 qeth_l3_open(card->dev); 3583 __qeth_l3_open(card->dev);
3568 else { 3584 else {
3569 rtnl_lock(); 3585 rtnl_lock();
3570 dev_open(card->dev); 3586 dev_open(card->dev);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf104943..207b7d742443 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], 60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
61 u8 ipuser[16]) 61 u8 ipuser[16])
62{ 62{
63 if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) 63 if (strncmp(ipvmid, "*MSG ", 8) != 0)
64 return -EINVAL; 64 return -EINVAL;
65 /* Path pending from *MSG. */ 65 /* Path pending from *MSG. */
66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); 66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31ae985c..77b26f5b9c33 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr.h 4** FILE NAME : arcmsr.h
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -46,8 +46,12 @@
46struct device_attribute; 46struct device_attribute;
47/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
48#define ARCMSR_MAX_OUTSTANDING_CMD 256 48#define ARCMSR_MAX_OUTSTANDING_CMD 256
49#define ARCMSR_MAX_FREECCB_NUM 320 49#ifdef CONFIG_XEN
50#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02" 50 #define ARCMSR_MAX_FREECCB_NUM 160
51#else
52 #define ARCMSR_MAX_FREECCB_NUM 320
53#endif
54#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05"
51#define ARCMSR_SCSI_INITIATOR_ID 255 55#define ARCMSR_SCSI_INITIATOR_ID 255
52#define ARCMSR_MAX_XFER_SECTORS 512 56#define ARCMSR_MAX_XFER_SECTORS 512
53#define ARCMSR_MAX_XFER_SECTORS_B 4096 57#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -60,7 +64,6 @@ struct device_attribute;
60#define ARCMSR_MAX_HBB_POSTQUEUE 264 64#define ARCMSR_MAX_HBB_POSTQUEUE 264
61#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ 65#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
62#define ARCMSR_CDB_SG_PAGE_LENGTH 256 66#define ARCMSR_CDB_SG_PAGE_LENGTH 256
63#define SCSI_CMD_ARECA_SPECIFIC 0xE1
64#ifndef PCI_DEVICE_ID_ARECA_1880 67#ifndef PCI_DEVICE_ID_ARECA_1880
65#define PCI_DEVICE_ID_ARECA_1880 0x1880 68#define PCI_DEVICE_ID_ARECA_1880 0x1880
66 #endif 69 #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..acdae33de521 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_attr.c 4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: attributes exported to sysfs and device host 6** Description: attributes exported to sysfs and device host
7******************************************************************************* 7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved 8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6b7da6..984bd527c6c9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_hba.c 4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
77MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78static int sleeptime = 10; 78static int sleeptime = 10;
79static int retrycount = 30; 79static int retrycount = 12;
80wait_queue_head_t wait_q; 80wait_queue_head_t wait_q;
81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd); 82 struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
187 if (isleep > 0) { 187 if (isleep > 0) {
188 msleep(isleep*1000); 188 msleep(isleep*1000);
189 } 189 }
190 printk(KERN_NOTICE "wake-up\n");
191 return 0; 190 return 0;
192} 191}
193 192
@@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
921} 920}
922 921
923static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 922static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
924
925{ 923{
926 int id, lun; 924 int id, lun;
927 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 925 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
948 , pCCB->startdone 946 , pCCB->startdone
949 , atomic_read(&acb->ccboutstandingcount)); 947 , atomic_read(&acb->ccboutstandingcount));
950 return; 948 return;
951 } 949 }
952 arcmsr_report_ccb_state(acb, pCCB, error); 950 arcmsr_report_ccb_state(acb, pCCB, error);
953} 951}
954 952
@@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
981 case ACB_ADAPTER_TYPE_B: { 979 case ACB_ADAPTER_TYPE_B: {
982 struct MessageUnit_B *reg = acb->pmuB; 980 struct MessageUnit_B *reg = acb->pmuB;
983 /*clear all outbound posted Q*/ 981 /*clear all outbound posted Q*/
984 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */ 982 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
985 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 983 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
986 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { 984 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
987 writel(0, &reg->done_qbuffer[i]); 985 writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1511 arcmsr_drain_donequeue(acb, pCCB, error); 1509 arcmsr_drain_donequeue(acb, pCCB, error);
1512 } 1510 }
1513} 1511}
1514
1515static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1512static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1516{ 1513{
1517 uint32_t index; 1514 uint32_t index;
@@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2106 if (atomic_read(&acb->ccboutstandingcount) >= 2103 if (atomic_read(&acb->ccboutstandingcount) >=
2107 ARCMSR_MAX_OUTSTANDING_CMD) 2104 ARCMSR_MAX_OUTSTANDING_CMD)
2108 return SCSI_MLQUEUE_HOST_BUSY; 2105 return SCSI_MLQUEUE_HOST_BUSY;
2109 if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
2110 printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
2111 return 0;
2112 }
2113 ccb = arcmsr_get_freeccb(acb); 2106 ccb = arcmsr_get_freeccb(acb);
2114 if (!ccb) 2107 if (!ccb)
2115 return SCSI_MLQUEUE_HOST_BUSY; 2108 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2393 int index, rtn; 2386 int index, rtn;
2394 bool error; 2387 bool error;
2395 polling_hbb_ccb_retry: 2388 polling_hbb_ccb_retry:
2389
2396 poll_count++; 2390 poll_count++;
2397 /* clear doorbell interrupt */ 2391 /* clear doorbell interrupt */
2398 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2392 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2663{ 2657{
2664 struct MessageUnit_A __iomem *reg = acb->pmuA; 2658 struct MessageUnit_A __iomem *reg = acb->pmuA;
2665 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2659 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2660 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2666 return; 2661 return;
2667 } else { 2662 } else {
2668 acb->fw_flag = FW_NORMAL; 2663 acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2670 atomic_set(&acb->rq_map_token, 16); 2665 atomic_set(&acb->rq_map_token, 16);
2671 } 2666 }
2672 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2667 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2673 if (atomic_dec_and_test(&acb->rq_map_token)) 2668 if (atomic_dec_and_test(&acb->rq_map_token)) {
2669 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2674 return; 2670 return;
2671 }
2675 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2672 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2676 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2673 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2677 } 2674 }
@@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2682{ 2679{
2683 struct MessageUnit_B __iomem *reg = acb->pmuB; 2680 struct MessageUnit_B __iomem *reg = acb->pmuB;
2684 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2681 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2682 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2685 return; 2683 return;
2686 } else { 2684 } else {
2687 acb->fw_flag = FW_NORMAL; 2685 acb->fw_flag = FW_NORMAL;
2688 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { 2686 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2689 atomic_set(&acb->rq_map_token,16); 2687 atomic_set(&acb->rq_map_token, 16);
2690 } 2688 }
2691 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2689 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2692 if(atomic_dec_and_test(&acb->rq_map_token)) 2690 if (atomic_dec_and_test(&acb->rq_map_token)) {
2691 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2693 return; 2692 return;
2693 }
2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2696 } 2696 }
@@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2701{ 2701{
2702 struct MessageUnit_C __iomem *reg = acb->pmuC; 2702 struct MessageUnit_C __iomem *reg = acb->pmuC;
2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { 2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
2704 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2704 return; 2705 return;
2705 } else { 2706 } else {
2706 acb->fw_flag = FW_NORMAL; 2707 acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2708 atomic_set(&acb->rq_map_token, 16); 2709 atomic_set(&acb->rq_map_token, 16);
2709 } 2710 }
2710 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2711 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2711 if (atomic_dec_and_test(&acb->rq_map_token)) 2712 if (atomic_dec_and_test(&acb->rq_map_token)) {
2713 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2712 return; 2714 return;
2715 }
2713 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2716 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2714 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 2717 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2715 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2718 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2897 uint32_t intmask_org; 2900 uint32_t intmask_org;
2898 uint8_t rtnval = 0x00; 2901 uint8_t rtnval = 0x00;
2899 int i = 0; 2902 int i = 0;
2903 unsigned long flags;
2904
2900 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2905 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2901 /* disable all outbound interrupt */ 2906 /* disable all outbound interrupt */
2902 intmask_org = arcmsr_disable_outbound_ints(acb); 2907 intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2907 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2912 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2908 ccb = acb->pccb_pool[i]; 2913 ccb = acb->pccb_pool[i];
2909 if (ccb->startdone == ARCMSR_CCB_START) { 2914 if (ccb->startdone == ARCMSR_CCB_START) {
2910 arcmsr_ccb_complete(ccb); 2915 scsi_dma_unmap(ccb->pcmd);
2916 ccb->startdone = ARCMSR_CCB_DONE;
2917 ccb->ccb_flags = 0;
2918 spin_lock_irqsave(&acb->ccblist_lock, flags);
2919 list_add_tail(&ccb->list, &acb->ccb_free_list);
2920 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2911 } 2921 }
2912 } 2922 }
2913 atomic_set(&acb->ccboutstandingcount, 0); 2923 atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2920 2930
2921static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2931static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2922{ 2932{
2923 struct AdapterControlBlock *acb = 2933 struct AdapterControlBlock *acb;
2924 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2925 uint32_t intmask_org, outbound_doorbell; 2934 uint32_t intmask_org, outbound_doorbell;
2926 int retry_count = 0; 2935 int retry_count = 0;
2927 int rtn = FAILED; 2936 int rtn = FAILED;
@@ -2971,31 +2980,16 @@ sleep_again:
2971 atomic_set(&acb->rq_map_token, 16); 2980 atomic_set(&acb->rq_map_token, 16);
2972 atomic_set(&acb->ante_token_value, 16); 2981 atomic_set(&acb->ante_token_value, 16);
2973 acb->fw_flag = FW_NORMAL; 2982 acb->fw_flag = FW_NORMAL;
2974 init_timer(&acb->eternal_timer); 2983 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2975 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2976 acb->eternal_timer.data = (unsigned long) acb;
2977 acb->eternal_timer.function = &arcmsr_request_device_map;
2978 add_timer(&acb->eternal_timer);
2979 acb->acb_flags &= ~ACB_F_BUS_RESET; 2984 acb->acb_flags &= ~ACB_F_BUS_RESET;
2980 rtn = SUCCESS; 2985 rtn = SUCCESS;
2981 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 2986 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
2982 } else { 2987 } else {
2983 acb->acb_flags &= ~ACB_F_BUS_RESET; 2988 acb->acb_flags &= ~ACB_F_BUS_RESET;
2984 if (atomic_read(&acb->rq_map_token) == 0) { 2989 atomic_set(&acb->rq_map_token, 16);
2985 atomic_set(&acb->rq_map_token, 16); 2990 atomic_set(&acb->ante_token_value, 16);
2986 atomic_set(&acb->ante_token_value, 16); 2991 acb->fw_flag = FW_NORMAL;
2987 acb->fw_flag = FW_NORMAL; 2992 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2988 init_timer(&acb->eternal_timer);
2989 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2990 acb->eternal_timer.data = (unsigned long) acb;
2991 acb->eternal_timer.function = &arcmsr_request_device_map;
2992 add_timer(&acb->eternal_timer);
2993 } else {
2994 atomic_set(&acb->rq_map_token, 16);
2995 atomic_set(&acb->ante_token_value, 16);
2996 acb->fw_flag = FW_NORMAL;
2997 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2998 }
2999 rtn = SUCCESS; 2993 rtn = SUCCESS;
3000 } 2994 }
3001 break; 2995 break;
@@ -3007,21 +3001,10 @@ sleep_again:
3007 rtn = FAILED; 3001 rtn = FAILED;
3008 } else { 3002 } else {
3009 acb->acb_flags &= ~ACB_F_BUS_RESET; 3003 acb->acb_flags &= ~ACB_F_BUS_RESET;
3010 if (atomic_read(&acb->rq_map_token) == 0) { 3004 atomic_set(&acb->rq_map_token, 16);
3011 atomic_set(&acb->rq_map_token, 16); 3005 atomic_set(&acb->ante_token_value, 16);
3012 atomic_set(&acb->ante_token_value, 16); 3006 acb->fw_flag = FW_NORMAL;
3013 acb->fw_flag = FW_NORMAL; 3007 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3014 init_timer(&acb->eternal_timer);
3015 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3016 acb->eternal_timer.data = (unsigned long) acb;
3017 acb->eternal_timer.function = &arcmsr_request_device_map;
3018 add_timer(&acb->eternal_timer);
3019 } else {
3020 atomic_set(&acb->rq_map_token, 16);
3021 atomic_set(&acb->ante_token_value, 16);
3022 acb->fw_flag = FW_NORMAL;
3023 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3024 }
3025 rtn = SUCCESS; 3008 rtn = SUCCESS;
3026 } 3009 }
3027 break; 3010 break;
@@ -3067,31 +3050,16 @@ sleep:
3067 atomic_set(&acb->rq_map_token, 16); 3050 atomic_set(&acb->rq_map_token, 16);
3068 atomic_set(&acb->ante_token_value, 16); 3051 atomic_set(&acb->ante_token_value, 16);
3069 acb->fw_flag = FW_NORMAL; 3052 acb->fw_flag = FW_NORMAL;
3070 init_timer(&acb->eternal_timer); 3053 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3071 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
3072 acb->eternal_timer.data = (unsigned long) acb;
3073 acb->eternal_timer.function = &arcmsr_request_device_map;
3074 add_timer(&acb->eternal_timer);
3075 acb->acb_flags &= ~ACB_F_BUS_RESET; 3054 acb->acb_flags &= ~ACB_F_BUS_RESET;
3076 rtn = SUCCESS; 3055 rtn = SUCCESS;
3077 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 3056 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3078 } else { 3057 } else {
3079 acb->acb_flags &= ~ACB_F_BUS_RESET; 3058 acb->acb_flags &= ~ACB_F_BUS_RESET;
3080 if (atomic_read(&acb->rq_map_token) == 0) { 3059 atomic_set(&acb->rq_map_token, 16);
3081 atomic_set(&acb->rq_map_token, 16); 3060 atomic_set(&acb->ante_token_value, 16);
3082 atomic_set(&acb->ante_token_value, 16); 3061 acb->fw_flag = FW_NORMAL;
3083 acb->fw_flag = FW_NORMAL; 3062 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3084 init_timer(&acb->eternal_timer);
3085 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3086 acb->eternal_timer.data = (unsigned long) acb;
3087 acb->eternal_timer.function = &arcmsr_request_device_map;
3088 add_timer(&acb->eternal_timer);
3089 } else {
3090 atomic_set(&acb->rq_map_token, 16);
3091 atomic_set(&acb->ante_token_value, 16);
3092 acb->fw_flag = FW_NORMAL;
3093 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3094 }
3095 rtn = SUCCESS; 3063 rtn = SUCCESS;
3096 } 3064 }
3097 break; 3065 break;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d3c5905b22ec..9c5c8be72231 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7515,16 +7515,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7515{ 7515{
7516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7516 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7517 volatile u32 int_reg; 7517 volatile u32 int_reg;
7518 int rc;
7519 7518
7520 ENTER; 7519 ENTER;
7521 ioa_cfg->pdev->state_saved = true; 7520 ioa_cfg->pdev->state_saved = true;
7522 rc = pci_restore_state(ioa_cfg->pdev); 7521 pci_restore_state(ioa_cfg->pdev);
7523
7524 if (rc != PCIBIOS_SUCCESSFUL) {
7525 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7526 return IPR_RC_JOB_CONTINUE;
7527 }
7528 7522
7529 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 7523 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7530 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7524 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 5815cbeb27a6..9a7aaf5f1311 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
646 646
647 spin_lock_irqsave(shost->host_lock, flags); 647 spin_lock_irqsave(shost->host_lock, flags);
648 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 648 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
649 shost->host_eh_scheduled = 0;
649 spin_unlock_irqrestore(shost->host_lock, flags); 650 spin_unlock_irqrestore(shost->host_lock, flags);
650 651
651 SAS_DPRINTK("Enter %s\n", __func__); 652 SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a817055b8b..9ead0399808a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2176 /* adjust hba_queue_depth, reply_free_queue_depth, 2176 /* adjust hba_queue_depth, reply_free_queue_depth,
2177 * and queue_size 2177 * and queue_size
2178 */ 2178 */
2179 ioc->hba_queue_depth -= queue_diff; 2179 ioc->hba_queue_depth -= (queue_diff / 2);
2180 ioc->reply_free_queue_depth -= queue_diff; 2180 ioc->reply_free_queue_depth -= (queue_diff / 2);
2181 queue_size -= queue_diff; 2181 queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2182 } 2182 }
2183 ioc->reply_post_queue_depth = queue_size; 2183 ioc->reply_post_queue_depth = queue_size;
2184 2184
@@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3941static void 3941static void
3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) 3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3943{ 3943{
3944 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3945 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3944 switch (reset_phase) { 3946 switch (reset_phase) {
3945 case MPT2_IOC_PRE_RESET: 3947 case MPT2_IOC_PRE_RESET:
3946 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3948 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3971 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 3973 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3972 break; 3974 break;
3973 } 3975 }
3974 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3975 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3976} 3976}
3977 3977
3978/** 3978/**
@@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4026{ 4026{
4027 int r; 4027 int r;
4028 unsigned long flags; 4028 unsigned long flags;
4029 u8 pe_complete = ioc->wait_for_port_enable_to_complete;
4029 4030
4030 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 4031 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4031 __func__)); 4032 __func__));
@@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4068 if (r) 4069 if (r)
4069 goto out; 4070 goto out;
4070 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); 4071 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4072
4073 /* If this hard reset is called while port enable is active, then
4074 * there is no reason to call make_ioc_operational
4075 */
4076 if (pe_complete) {
4077 r = -EFAULT;
4078 goto out;
4079 }
4071 r = _base_make_ioc_operational(ioc, sleep_flag); 4080 r = _base_make_ioc_operational(ioc, sleep_flag);
4072 if (!r) 4081 if (!r)
4073 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 4082 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c57979..5ded3db6e316 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
819} 819}
820 820
821/** 821/**
822 * mptscsih_get_scsi_lookup - returns scmd entry 822 * _scsih_scsi_lookup_get - returns scmd entry
823 * @ioc: per adapter object 823 * @ioc: per adapter object
824 * @smid: system request message index 824 * @smid: system request message index
825 * 825 *
@@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
832} 832}
833 833
834/** 834/**
835 * _scsih_scsi_lookup_get_clear - returns scmd entry
836 * @ioc: per adapter object
837 * @smid: system request message index
838 *
839 * Returns the smid stored scmd pointer.
840 * Then will derefrence the stored scmd pointer.
841 */
842static inline struct scsi_cmnd *
843_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
844{
845 unsigned long flags;
846 struct scsi_cmnd *scmd;
847
848 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
849 scmd = ioc->scsi_lookup[smid - 1].scmd;
850 ioc->scsi_lookup[smid - 1].scmd = NULL;
851 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
852
853 return scmd;
854}
855
856/**
835 * _scsih_scsi_lookup_find_by_scmd - scmd lookup 857 * _scsih_scsi_lookup_find_by_scmd - scmd lookup
836 * @ioc: per adapter object 858 * @ioc: per adapter object
837 * @smid: system request message index 859 * @smid: system request message index
@@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2981 u16 handle; 3003 u16 handle;
2982 3004
2983 for (i = 0 ; i < event_data->NumEntries; i++) { 3005 for (i = 0 ; i < event_data->NumEntries; i++) {
2984 if (event_data->PHY[i].PhyStatus &
2985 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
2986 continue;
2987 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 3006 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2988 if (!handle) 3007 if (!handle)
2989 continue; 3008 continue;
@@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
3210 u16 count = 0; 3229 u16 count = 0;
3211 3230
3212 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 3231 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
3213 scmd = _scsih_scsi_lookup_get(ioc, smid); 3232 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3214 if (!scmd) 3233 if (!scmd)
3215 continue; 3234 continue;
3216 count++; 3235 count++;
@@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3804 u32 response_code = 0; 3823 u32 response_code = 0;
3805 3824
3806 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 3825 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3807 scmd = _scsih_scsi_lookup_get(ioc, smid); 3826 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3808 if (scmd == NULL) 3827 if (scmd == NULL)
3809 return 1; 3828 return 1;
3810 3829
@@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5005 event_data); 5024 event_data);
5006#endif 5025#endif
5007 5026
5027 /* In MPI Revision K (0xC), the internal device reset complete was
5028 * implemented, so avoid setting tm_busy flag for older firmware.
5029 */
5030 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
5031 return;
5032
5008 if (event_data->ReasonCode != 5033 if (event_data->ReasonCode !=
5009 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 5034 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
5010 event_data->ReasonCode != 5035 event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5099 struct fw_event_work *fw_event) 5124 struct fw_event_work *fw_event)
5100{ 5125{
5101 struct scsi_cmnd *scmd; 5126 struct scsi_cmnd *scmd;
5127 struct scsi_device *sdev;
5102 u16 smid, handle; 5128 u16 smid, handle;
5103 u32 lun; 5129 u32 lun;
5104 struct MPT2SAS_DEVICE *sas_device_priv_data; 5130 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5109 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 5135 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
5110#endif 5136#endif
5111 u16 ioc_status; 5137 u16 ioc_status;
5138 unsigned long flags;
5139 int r;
5140
5112 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: " 5141 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
5113 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 5142 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
5114 event_data->PortWidth)); 5143 event_data->PortWidth));
5115 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 5144 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
5116 __func__)); 5145 __func__));
5117 5146
5147 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5148 ioc->broadcast_aen_busy = 0;
5118 termination_count = 0; 5149 termination_count = 0;
5119 query_count = 0; 5150 query_count = 0;
5120 mpi_reply = ioc->tm_cmds.reply; 5151 mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5122 scmd = _scsih_scsi_lookup_get(ioc, smid); 5153 scmd = _scsih_scsi_lookup_get(ioc, smid);
5123 if (!scmd) 5154 if (!scmd)
5124 continue; 5155 continue;
5125 sas_device_priv_data = scmd->device->hostdata; 5156 sdev = scmd->device;
5157 sas_device_priv_data = sdev->hostdata;
5126 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 5158 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
5127 continue; 5159 continue;
5128 /* skip hidden raid components */ 5160 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5138 lun = sas_device_priv_data->lun; 5170 lun = sas_device_priv_data->lun;
5139 query_count++; 5171 query_count++;
5140 5172
5173 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5141 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5174 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5142 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); 5175 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
5143 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 5176 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5147 (mpi_reply->ResponseCode == 5180 (mpi_reply->ResponseCode ==
5148 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 5181 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
5149 mpi_reply->ResponseCode == 5182 mpi_reply->ResponseCode ==
5150 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) 5183 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
5184 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5151 continue; 5185 continue;
5152 5186 }
5153 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5187 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5154 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL); 5188 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5189 scmd);
5190 if (r == FAILED)
5191 sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
5192 "scmd(%p)\n", scmd);
5155 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 5193 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
5194 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5156 } 5195 }
5157 ioc->broadcast_aen_busy = 0; 5196 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5158 5197
5159 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 5198 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
5160 "%s - exit, query_count = %d termination_count = %d\n", 5199 "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev)
6626 destroy_workqueue(wq); 6665 destroy_workqueue(wq);
6627 6666
6628 /* release all the volumes */ 6667 /* release all the volumes */
6668 _scsih_ir_shutdown(ioc);
6629 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 6669 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
6630 list) { 6670 list) {
6631 if (raid_device->starget) { 6671 if (raid_device->starget) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 300d59f389da..321cf3ae8630 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2228,12 +2228,7 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
2228 /* Once either bist or pci reset is done, restore PCI config 2228 /* Once either bist or pci reset is done, restore PCI config
2229 * space. If this fails, proceed with hard reset again 2229 * space. If this fails, proceed with hard reset again
2230 */ 2230 */
2231 if (pci_restore_state(pinstance->pdev)) { 2231 pci_restore_state(pinstance->pdev);
2232 pmcraid_info("config-space error resetting again\n");
2233 pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
2234 pmcraid_reset_alert(cmd);
2235 break;
2236 }
2237 2232
2238 /* fail all pending commands */ 2233 /* fail all pending commands */
2239 pmcraid_fail_outstanding_cmds(pinstance); 2234 pmcraid_fail_outstanding_cmds(pinstance);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b65e65aa07eb..e56730214c05 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -990,30 +990,51 @@ out:
990 990
991static void set_media_not_present(struct scsi_disk *sdkp) 991static void set_media_not_present(struct scsi_disk *sdkp)
992{ 992{
993 sdkp->media_present = 0; 993 if (sdkp->media_present)
994 sdkp->capacity = 0; 994 sdkp->device->changed = 1;
995 sdkp->device->changed = 1; 995
996 if (sdkp->device->removable) {
997 sdkp->media_present = 0;
998 sdkp->capacity = 0;
999 }
1000}
1001
1002static int media_not_present(struct scsi_disk *sdkp,
1003 struct scsi_sense_hdr *sshdr)
1004{
1005 if (!scsi_sense_valid(sshdr))
1006 return 0;
1007
1008 /* not invoked for commands that could return deferred errors */
1009 switch (sshdr->sense_key) {
1010 case UNIT_ATTENTION:
1011 case NOT_READY:
1012 /* medium not present */
1013 if (sshdr->asc == 0x3A) {
1014 set_media_not_present(sdkp);
1015 return 1;
1016 }
1017 }
1018 return 0;
996} 1019}
997 1020
998/** 1021/**
999 * sd_media_changed - check if our medium changed 1022 * sd_check_events - check media events
1000 * @disk: kernel device descriptor 1023 * @disk: kernel device descriptor
1024 * @clearing: disk events currently being cleared
1001 * 1025 *
1002 * Returns 0 if not applicable or no change; 1 if change 1026 * Returns mask of DISK_EVENT_*.
1003 * 1027 *
1004 * Note: this function is invoked from the block subsystem. 1028 * Note: this function is invoked from the block subsystem.
1005 **/ 1029 **/
1006static int sd_media_changed(struct gendisk *disk) 1030static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1007{ 1031{
1008 struct scsi_disk *sdkp = scsi_disk(disk); 1032 struct scsi_disk *sdkp = scsi_disk(disk);
1009 struct scsi_device *sdp = sdkp->device; 1033 struct scsi_device *sdp = sdkp->device;
1010 struct scsi_sense_hdr *sshdr = NULL; 1034 struct scsi_sense_hdr *sshdr = NULL;
1011 int retval; 1035 int retval;
1012 1036
1013 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n")); 1037 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
1014
1015 if (!sdp->removable)
1016 return 0;
1017 1038
1018 /* 1039 /*
1019 * If the device is offline, don't send any commands - just pretend as 1040 * If the device is offline, don't send any commands - just pretend as
@@ -1043,40 +1064,32 @@ static int sd_media_changed(struct gendisk *disk)
1043 sshdr); 1064 sshdr);
1044 } 1065 }
1045 1066
1046 if (retval) { 1067 /* failed to execute TUR, assume media not present */
1068 if (host_byte(retval)) {
1047 set_media_not_present(sdkp); 1069 set_media_not_present(sdkp);
1048 goto out; 1070 goto out;
1049 } 1071 }
1050 1072
1073 if (media_not_present(sdkp, sshdr))
1074 goto out;
1075
1051 /* 1076 /*
1052 * For removable scsi disk we have to recognise the presence 1077 * For removable scsi disk we have to recognise the presence
1053 * of a disk in the drive. This is kept in the struct scsi_disk 1078 * of a disk in the drive.
1054 * struct and tested at open ! Daniel Roche (dan@lectra.fr)
1055 */ 1079 */
1080 if (!sdkp->media_present)
1081 sdp->changed = 1;
1056 sdkp->media_present = 1; 1082 sdkp->media_present = 1;
1057
1058out: 1083out:
1059 /* 1084 /*
1060 * Report a media change under the following conditions: 1085 * sdp->changed is set under the following conditions:
1061 *
1062 * Medium is present now and wasn't present before.
1063 * Medium wasn't present before and is present now.
1064 * Medium was present at all times, but it changed while
1065 * we weren't looking (sdp->changed is set).
1066 * 1086 *
1067 * If there was no medium before and there is no medium now then 1087 * Medium present state has changed in either direction.
1068 * don't report a change, even if a medium was inserted and removed 1088 * Device has indicated UNIT_ATTENTION.
1069 * while we weren't looking.
1070 */ 1089 */
1071 retval = (sdkp->media_present != sdkp->previous_state ||
1072 (sdkp->media_present && sdp->changed));
1073 if (retval)
1074 sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
1075 sdkp->previous_state = sdkp->media_present;
1076
1077 /* sdp->changed indicates medium was changed or is not present */
1078 sdp->changed = !sdkp->media_present;
1079 kfree(sshdr); 1090 kfree(sshdr);
1091 retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1092 sdp->changed = 0;
1080 return retval; 1093 return retval;
1081} 1094}
1082 1095
@@ -1169,7 +1182,7 @@ static const struct block_device_operations sd_fops = {
1169#ifdef CONFIG_COMPAT 1182#ifdef CONFIG_COMPAT
1170 .compat_ioctl = sd_compat_ioctl, 1183 .compat_ioctl = sd_compat_ioctl,
1171#endif 1184#endif
1172 .media_changed = sd_media_changed, 1185 .check_events = sd_check_events,
1173 .revalidate_disk = sd_revalidate_disk, 1186 .revalidate_disk = sd_revalidate_disk,
1174 .unlock_native_capacity = sd_unlock_native_capacity, 1187 .unlock_native_capacity = sd_unlock_native_capacity,
1175}; 1188};
@@ -1312,23 +1325,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1312 return good_bytes; 1325 return good_bytes;
1313} 1326}
1314 1327
1315static int media_not_present(struct scsi_disk *sdkp,
1316 struct scsi_sense_hdr *sshdr)
1317{
1318
1319 if (!scsi_sense_valid(sshdr))
1320 return 0;
1321 /* not invoked for commands that could return deferred errors */
1322 if (sshdr->sense_key != NOT_READY &&
1323 sshdr->sense_key != UNIT_ATTENTION)
1324 return 0;
1325 if (sshdr->asc != 0x3A) /* medium not present */
1326 return 0;
1327
1328 set_media_not_present(sdkp);
1329 return 1;
1330}
1331
1332/* 1328/*
1333 * spinup disk - called only in sd_revalidate_disk() 1329 * spinup disk - called only in sd_revalidate_disk()
1334 */ 1330 */
@@ -1503,7 +1499,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1503 */ 1499 */
1504 if (sdp->removable && 1500 if (sdp->removable &&
1505 sense_valid && sshdr->sense_key == NOT_READY) 1501 sense_valid && sshdr->sense_key == NOT_READY)
1506 sdp->changed = 1; 1502 set_media_not_present(sdkp);
1507 1503
1508 /* 1504 /*
1509 * We used to set media_present to 0 here to indicate no media 1505 * We used to set media_present to 0 here to indicate no media
@@ -2389,8 +2385,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2389 2385
2390 gd->driverfs_dev = &sdp->sdev_gendev; 2386 gd->driverfs_dev = &sdp->sdev_gendev;
2391 gd->flags = GENHD_FL_EXT_DEVT; 2387 gd->flags = GENHD_FL_EXT_DEVT;
2392 if (sdp->removable) 2388 if (sdp->removable) {
2393 gd->flags |= GENHD_FL_REMOVABLE; 2389 gd->flags |= GENHD_FL_REMOVABLE;
2390 gd->events |= DISK_EVENT_MEDIA_CHANGE;
2391 }
2394 2392
2395 add_disk(gd); 2393 add_disk(gd);
2396 sd_dif_config_host(sdkp); 2394 sd_dif_config_host(sdkp);
@@ -2472,7 +2470,6 @@ static int sd_probe(struct device *dev)
2472 sdkp->disk = gd; 2470 sdkp->disk = gd;
2473 sdkp->index = index; 2471 sdkp->index = index;
2474 atomic_set(&sdkp->openers, 0); 2472 atomic_set(&sdkp->openers, 0);
2475 sdkp->previous_state = 1;
2476 2473
2477 if (!sdp->request_queue->rq_timeout) { 2474 if (!sdp->request_queue->rq_timeout) {
2478 if (sdp->type != TYPE_MOD) 2475 if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488faf0815..c9d8f6ca49e2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@ struct scsi_disk {
55 u8 media_present; 55 u8 media_present;
56 u8 write_prot; 56 u8 write_prot;
57 u8 protection_type;/* Data Integrity Field */ 57 u8 protection_type;/* Data Integrity Field */
58 unsigned previous_state : 1;
59 unsigned ATO : 1; /* state of disk ATO bit */ 58 unsigned ATO : 1; /* state of disk ATO bit */
60 unsigned WCE : 1; /* state of disk WCE bit */ 59 unsigned WCE : 1; /* state of disk WCE bit */
61 unsigned RCD : 1; /* state of disk RCD bit, unused */ 60 unsigned RCD : 1; /* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index be6baf8ad704..aefadc6a1607 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -249,10 +249,6 @@ skip_tur:
249 cd->device->changed = 0; 249 cd->device->changed = 0;
250 } 250 }
251 251
252 /* for backward compatibility */
253 if (events & DISK_EVENT_MEDIA_CHANGE)
254 sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
255 GFP_KERNEL);
256 return events; 252 return events;
257} 253}
258 254
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index ceba593dc84f..04113e5304a0 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -101,7 +101,7 @@ static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
101 return NULL; 101 return NULL;
102 102
103 if (sfi_use_ioremap) 103 if (sfi_use_ioremap)
104 return ioremap(phys, size); 104 return ioremap_cache(phys, size);
105 else 105 else
106 return early_ioremap(phys, size); 106 return early_ioremap(phys, size);
107} 107}
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index de885a0f917a..f33e2dd97934 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -173,7 +173,8 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
173 return 0; 173 return 0;
174} 174}
175 175
176#define VALID(x) (x | 0x80) 176#define SENSE_VALID_FLAG 0x80
177#define VALID(x) (x | SENSE_VALID_FLAG)
177 178
178static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { 179static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
179 [IRQ_TYPE_EDGE_FALLING] = VALID(0), 180 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
@@ -201,7 +202,8 @@ static int intc_set_type(struct irq_data *data, unsigned int type)
201 ihp = intc_find_irq(d->sense, d->nr_sense, irq); 202 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
202 if (ihp) { 203 if (ihp) {
203 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); 204 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
204 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); 205 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
206 value & ~SENSE_VALID_FLAG);
205 } 207 }
206 208
207 return 0; 209 return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1906840c1113..bb233a9cbad2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -53,6 +53,14 @@ if SPI_MASTER
53 53
54comment "SPI Master Controller Drivers" 54comment "SPI Master Controller Drivers"
55 55
56config SPI_ATH79
57 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
58 depends on ATH79 && GENERIC_GPIO
59 select SPI_BITBANG
60 help
61 This enables support for the SPI controller present on the
62 Atheros AR71XX/AR724X/AR913X SoCs.
63
56config SPI_ATMEL 64config SPI_ATMEL
57 tristate "Atmel SPI Controller" 65 tristate "Atmel SPI Controller"
58 depends on (ARCH_AT91 || AVR32) 66 depends on (ARCH_AT91 || AVR32)
@@ -156,10 +164,10 @@ config SPI_IMX_VER_0_4
156 def_bool y if ARCH_MX31 164 def_bool y if ARCH_MX31
157 165
158config SPI_IMX_VER_0_7 166config SPI_IMX_VER_0_7
159 def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 167 def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
160 168
161config SPI_IMX_VER_2_3 169config SPI_IMX_VER_2_3
162 def_bool y if ARCH_MX51 170 def_bool y if ARCH_MX51 || ARCH_MX53
163 171
164config SPI_IMX 172config SPI_IMX
165 tristate "Freescale i.MX SPI controllers" 173 tristate "Freescale i.MX SPI controllers"
@@ -310,8 +318,8 @@ config SPI_S3C24XX_GPIO
310 318
311config SPI_S3C64XX 319config SPI_S3C64XX
312 tristate "Samsung S3C64XX series type SPI" 320 tristate "Samsung S3C64XX series type SPI"
313 depends on ARCH_S3C64XX && EXPERIMENTAL 321 depends on (ARCH_S3C64XX || ARCH_S5P64X0)
314 select S3C64XX_DMA 322 select S3C64XX_DMA if ARCH_S3C64XX
315 help 323 help
316 SPI driver for Samsung S3C64XX and newer SoCs. 324 SPI driver for Samsung S3C64XX and newer SoCs.
317 325
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3a42463c92a4..86d1b5f9bbd9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o
10 10
11# SPI master controller drivers (bus) 11# SPI master controller drivers (bus)
12obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o 12obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
13obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
13obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o 14obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
14obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 15obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
15obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 16obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index a2a5921c730a..71a1219a995d 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1795,7 +1795,7 @@ static int pl022_setup(struct spi_device *spi)
1795{ 1795{
1796 struct pl022_config_chip const *chip_info; 1796 struct pl022_config_chip const *chip_info;
1797 struct chip_data *chip; 1797 struct chip_data *chip;
1798 struct ssp_clock_params clk_freq; 1798 struct ssp_clock_params clk_freq = {0, };
1799 int status = 0; 1799 int status = 0;
1800 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1800 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1801 unsigned int bits = spi->bits_per_word; 1801 unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/ath79_spi.c
new file mode 100644
index 000000000000..fcff810ea3b0
--- /dev/null
+++ b/drivers/spi/ath79_spi.c
@@ -0,0 +1,292 @@
1/*
2 * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
3 *
4 * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This driver has been based on the spi-gpio.c:
7 * Copyright (C) 2006,2008 David Brownell
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/spinlock.h>
19#include <linux/workqueue.h>
20#include <linux/platform_device.h>
21#include <linux/io.h>
22#include <linux/spi/spi.h>
23#include <linux/spi/spi_bitbang.h>
24#include <linux/bitops.h>
25#include <linux/gpio.h>
26
27#include <asm/mach-ath79/ar71xx_regs.h>
28#include <asm/mach-ath79/ath79_spi_platform.h>
29
30#define DRV_NAME "ath79-spi"
31
32struct ath79_spi {
33 struct spi_bitbang bitbang;
34 u32 ioc_base;
35 u32 reg_ctrl;
36 void __iomem *base;
37};
38
39static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
40{
41 return ioread32(sp->base + reg);
42}
43
44static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val)
45{
46 iowrite32(val, sp->base + reg);
47}
48
49static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
50{
51 return spi_master_get_devdata(spi->master);
52}
53
54static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
55{
56 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
57 int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
58
59 if (is_active) {
60 /* set initial clock polarity */
61 if (spi->mode & SPI_CPOL)
62 sp->ioc_base |= AR71XX_SPI_IOC_CLK;
63 else
64 sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
65
66 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
67 }
68
69 if (spi->chip_select) {
70 struct ath79_spi_controller_data *cdata = spi->controller_data;
71
72 /* SPI is normally active-low */
73 gpio_set_value(cdata->gpio, cs_high);
74 } else {
75 if (cs_high)
76 sp->ioc_base |= AR71XX_SPI_IOC_CS0;
77 else
78 sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
79
80 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
81 }
82
83}
84
85static int ath79_spi_setup_cs(struct spi_device *spi)
86{
87 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
88 struct ath79_spi_controller_data *cdata;
89
90 cdata = spi->controller_data;
91 if (spi->chip_select && !cdata)
92 return -EINVAL;
93
94 /* enable GPIO mode */
95 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
96
97 /* save CTRL register */
98 sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
99 sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
100
101 /* TODO: setup speed? */
102 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
103
104 if (spi->chip_select) {
105 int status = 0;
106
107 status = gpio_request(cdata->gpio, dev_name(&spi->dev));
108 if (status)
109 return status;
110
111 status = gpio_direction_output(cdata->gpio,
112 spi->mode & SPI_CS_HIGH);
113 if (status) {
114 gpio_free(cdata->gpio);
115 return status;
116 }
117 } else {
118 if (spi->mode & SPI_CS_HIGH)
119 sp->ioc_base |= AR71XX_SPI_IOC_CS0;
120 else
121 sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
122 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
123 }
124
125 return 0;
126}
127
128static void ath79_spi_cleanup_cs(struct spi_device *spi)
129{
130 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
131
132 if (spi->chip_select) {
133 struct ath79_spi_controller_data *cdata = spi->controller_data;
134 gpio_free(cdata->gpio);
135 }
136
137 /* restore CTRL register */
138 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
139 /* disable GPIO mode */
140 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
141}
142
143static int ath79_spi_setup(struct spi_device *spi)
144{
145 int status = 0;
146
147 if (spi->bits_per_word > 32)
148 return -EINVAL;
149
150 if (!spi->controller_state) {
151 status = ath79_spi_setup_cs(spi);
152 if (status)
153 return status;
154 }
155
156 status = spi_bitbang_setup(spi);
157 if (status && !spi->controller_state)
158 ath79_spi_cleanup_cs(spi);
159
160 return status;
161}
162
163static void ath79_spi_cleanup(struct spi_device *spi)
164{
165 ath79_spi_cleanup_cs(spi);
166 spi_bitbang_cleanup(spi);
167}
168
169static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
170 u32 word, u8 bits)
171{
172 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
173 u32 ioc = sp->ioc_base;
174
175 /* clock starts at inactive polarity */
176 for (word <<= (32 - bits); likely(bits); bits--) {
177 u32 out;
178
179 if (word & (1 << 31))
180 out = ioc | AR71XX_SPI_IOC_DO;
181 else
182 out = ioc & ~AR71XX_SPI_IOC_DO;
183
184 /* setup MSB (to slave) on trailing edge */
185 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
186 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
187
188 word <<= 1;
189 }
190
191 return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
192}
193
194static __devinit int ath79_spi_probe(struct platform_device *pdev)
195{
196 struct spi_master *master;
197 struct ath79_spi *sp;
198 struct ath79_spi_platform_data *pdata;
199 struct resource *r;
200 int ret;
201
202 master = spi_alloc_master(&pdev->dev, sizeof(*sp));
203 if (master == NULL) {
204 dev_err(&pdev->dev, "failed to allocate spi master\n");
205 return -ENOMEM;
206 }
207
208 sp = spi_master_get_devdata(master);
209 platform_set_drvdata(pdev, sp);
210
211 pdata = pdev->dev.platform_data;
212
213 master->setup = ath79_spi_setup;
214 master->cleanup = ath79_spi_cleanup;
215 if (pdata) {
216 master->bus_num = pdata->bus_num;
217 master->num_chipselect = pdata->num_chipselect;
218 } else {
219 master->bus_num = -1;
220 master->num_chipselect = 1;
221 }
222
223 sp->bitbang.master = spi_master_get(master);
224 sp->bitbang.chipselect = ath79_spi_chipselect;
225 sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
226 sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
227 sp->bitbang.flags = SPI_CS_HIGH;
228
229 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
230 if (r == NULL) {
231 ret = -ENOENT;
232 goto err_put_master;
233 }
234
235 sp->base = ioremap(r->start, r->end - r->start + 1);
236 if (!sp->base) {
237 ret = -ENXIO;
238 goto err_put_master;
239 }
240
241 ret = spi_bitbang_start(&sp->bitbang);
242 if (ret)
243 goto err_unmap;
244
245 return 0;
246
247err_unmap:
248 iounmap(sp->base);
249err_put_master:
250 platform_set_drvdata(pdev, NULL);
251 spi_master_put(sp->bitbang.master);
252
253 return ret;
254}
255
256static __devexit int ath79_spi_remove(struct platform_device *pdev)
257{
258 struct ath79_spi *sp = platform_get_drvdata(pdev);
259
260 spi_bitbang_stop(&sp->bitbang);
261 iounmap(sp->base);
262 platform_set_drvdata(pdev, NULL);
263 spi_master_put(sp->bitbang.master);
264
265 return 0;
266}
267
268static struct platform_driver ath79_spi_driver = {
269 .probe = ath79_spi_probe,
270 .remove = __devexit_p(ath79_spi_remove),
271 .driver = {
272 .name = DRV_NAME,
273 .owner = THIS_MODULE,
274 },
275};
276
277static __init int ath79_spi_init(void)
278{
279 return platform_driver_register(&ath79_spi_driver);
280}
281module_init(ath79_spi_init);
282
283static __exit void ath79_spi_exit(void)
284{
285 platform_driver_unregister(&ath79_spi_driver);
286}
287module_exit(ath79_spi_exit);
288
289MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
290MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
291MODULE_LICENSE("GPL v2");
292MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9c1b24..2fa012c109bc 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/err.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
@@ -68,8 +69,8 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
68 } 69 }
69 70
70 dwsmmio->clk = clk_get(&pdev->dev, NULL); 71 dwsmmio->clk = clk_get(&pdev->dev, NULL);
71 if (!dwsmmio->clk) { 72 if (IS_ERR(dwsmmio->clk)) {
72 ret = -ENODEV; 73 ret = PTR_ERR(dwsmmio->clk);
73 goto err_irq; 74 goto err_irq;
74 } 75 }
75 clk_enable(dwsmmio->clk); 76 clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 9469564e6888..1cf9d5faabf4 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -743,6 +743,12 @@ static struct platform_device_id spi_imx_devtype[] = {
743 .name = "imx51-ecspi", 743 .name = "imx51-ecspi",
744 .driver_data = SPI_IMX_VER_2_3, 744 .driver_data = SPI_IMX_VER_2_3,
745 }, { 745 }, {
746 .name = "imx53-cspi",
747 .driver_data = SPI_IMX_VER_0_7,
748 }, {
749 .name = "imx53-ecspi",
750 .driver_data = SPI_IMX_VER_2_3,
751 }, {
746 /* sentinel */ 752 /* sentinel */
747 } 753 }
748}; 754};
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index d93b66743ba7..2c665fceaac7 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
509 bytes_done = 0; 509 bytes_done = 0;
510 510
511 while (bytes_done < t->len) { 511 while (bytes_done < t->len) {
512 void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
513 const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
512 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, 514 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
513 t->tx_buf + bytes_done, 515 tx_buf,
514 t->rx_buf + bytes_done, 516 rx_buf,
515 words, bits); 517 words, bits);
516 if (n < 0) 518 if (n < 0)
517 break; 519 break;
@@ -635,7 +637,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
635 ret = spi_bitbang_stop(&p->bitbang); 637 ret = spi_bitbang_stop(&p->bitbang);
636 if (!ret) { 638 if (!ret) {
637 pm_runtime_disable(&pdev->dev); 639 pm_runtime_disable(&pdev->dev);
638 free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq); 640 free_irq(platform_get_irq(pdev, 0), p);
639 iounmap(p->mapbase); 641 iounmap(p->mapbase);
640 clk_put(p->clk); 642 clk_put(p->clk);
641 spi_master_put(p->bitbang.master); 643 spi_master_put(p->bitbang.master);
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02a5472..891e5909038c 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
513 } 513 }
514 514
515 tspi->clk = clk_get(&pdev->dev, NULL); 515 tspi->clk = clk_get(&pdev->dev, NULL);
516 if (IS_ERR_OR_NULL(tspi->clk)) { 516 if (IS_ERR(tspi->clk)) {
517 dev_err(&pdev->dev, "can not get clock\n"); 517 dev_err(&pdev->dev, "can not get clock\n");
518 ret = PTR_ERR(tspi->clk); 518 ret = PTR_ERR(tspi->clk);
519 goto err2; 519 goto err2;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2d8cc455dbc7..42cdaa9a4d8a 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -82,7 +82,7 @@ config SSB_SDIOHOST
82 82
83config SSB_SILENT 83config SSB_SILENT
84 bool "No SSB kernel messages" 84 bool "No SSB kernel messages"
85 depends on SSB && EMBEDDED 85 depends on SSB && EXPERT
86 help 86 help
87 This option turns off all Sonics Silicon Backplane printks. 87 This option turns off all Sonics Silicon Backplane printks.
88 Note that you won't be able to identify problems, once 88 Note that you won't be able to identify problems, once
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c7345dbf43fa..f8533795ee7f 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
733 733
734 /* Fetch the vendor specific tuples. */ 734 /* Fetch the vendor specific tuples. */
735 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, 735 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
736 ssb_pcmcia_do_get_invariants, sprom); 736 ssb_pcmcia_do_get_invariants, iv);
737 if ((res == 0) || (res == -ENOSPC)) 737 if ((res == 0) || (res == -ENOSPC))
738 return 0; 738 return 0;
739 739
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d4ce15..29884c00c4d5 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@ int ssb_bus_scan(struct ssb_bus *bus,
420 bus->pcicore.dev = dev; 420 bus->pcicore.dev = dev;
421#endif /* CONFIG_SSB_DRIVER_PCICORE */ 421#endif /* CONFIG_SSB_DRIVER_PCICORE */
422 break; 422 break;
423 case SSB_DEV_ETHERNET:
424 if (bus->bustype == SSB_BUSTYPE_PCI) {
425 if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
426 (bus->host_pci->device & 0xFF00) == 0x4300) {
427 /* This is a dangling ethernet core on a
428 * wireless device. Ignore it. */
429 continue;
430 }
431 }
432 break;
423 default: 433 default:
424 break; 434 break;
425 } 435 }
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
index 0e298dba9fc8..29b8ab44ea47 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
@@ -360,8 +360,8 @@ int PSSendOps(void *arg)
360 status = 1; 360 status = 1;
361 goto complete; 361 goto complete;
362 } 362 }
363 len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size; 363 len = min(firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
364 memcpy(config_bdaddr, firmware->data,len); 364 memcpy(config_bdaddr, firmware->data, len);
365 config_bdaddr[len] = '\0'; 365 config_bdaddr[len] = '\0';
366 write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING); 366 write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
367 A_RELEASE_FIRMWARE(firmware); 367 A_RELEASE_FIRMWARE(firmware);
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index d3f42c8325f7..a08bd7355035 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -88,14 +88,13 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb,
88 } 88 }
89 path.mnt = mnt; 89 path.mnt = mnt;
90 path_get(&path); 90 path_get(&path);
91 if (!follow_down(&path)) { 91 if (!follow_down_one(&path)) {
92 path_put(&path); 92 path_put(&path);
93 DPRINTK(("autofs: not expirable\ 93 DPRINTK(("autofs: not expirable\
94 (not a mounted directory): %s\n", ent->name)); 94 (not a mounted directory): %s\n", ent->name));
95 continue; 95 continue;
96 } 96 }
97 while (d_mountpoint(path.dentry) && follow_down(&path)) 97 follow_down(&path, false); // TODO: need to check error
98 ;
99 umount_ok = may_umount(path.mnt); 98 umount_ok = may_umount(path.mnt);
100 path_put(&path); 99 path_put(&path);
101 100
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 8ce4536e6e28..feade9451b2e 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -359,12 +359,11 @@ static VOID PruneQueue(PMINI_ADAPTER Adapter, INT iIndex)
359 359
360 if(PacketToDrop) 360 if(PacketToDrop)
361 { 361 {
362 struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iIndex);
363 if (netif_msg_tx_err(Adapter)) 362 if (netif_msg_tx_err(Adapter))
364 pr_info(PFX "%s: tx queue %d overlimit\n", 363 pr_info(PFX "%s: tx queue %d overlimit\n",
365 Adapter->dev->name, iIndex); 364 Adapter->dev->name, iIndex);
366 365
367 txq->tx_dropped++; 366 netstats->tx_dropped++;
368 367
369 DEQUEUEPACKET(Adapter->PackInfo[iIndex].FirstTxQueue, 368 DEQUEUEPACKET(Adapter->PackInfo[iIndex].FirstTxQueue,
370 Adapter->PackInfo[iIndex].LastTxQueue); 369 Adapter->PackInfo[iIndex].LastTxQueue);
@@ -404,7 +403,7 @@ VOID flush_all_queues(PMINI_ADAPTER Adapter)
404// down(&Adapter->data_packet_queue_lock); 403// down(&Adapter->data_packet_queue_lock);
405 for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++) 404 for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
406 { 405 {
407 struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iQIndex); 406 struct net_device_stats *netstats = &Adapter->dev->stats;
408 407
409 spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock); 408 spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
410 while(Adapter->PackInfo[iQIndex].FirstTxQueue) 409 while(Adapter->PackInfo[iQIndex].FirstTxQueue)
@@ -413,7 +412,7 @@ VOID flush_all_queues(PMINI_ADAPTER Adapter)
413 if(PacketToDrop) 412 if(PacketToDrop)
414 { 413 {
415 uiTotalPacketLength = PacketToDrop->len; 414 uiTotalPacketLength = PacketToDrop->len;
416 txq->tx_dropped++; 415 netstats->tx_dropped++;
417 } 416 }
418 else 417 else
419 uiTotalPacketLength = 0; 418 uiTotalPacketLength = 0;
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
index 0f7000960d50..d5e4a7404f71 100644
--- a/drivers/staging/bcm/Transmit.c
+++ b/drivers/staging/bcm/Transmit.c
@@ -157,11 +157,11 @@ INT SetupNextSend(PMINI_ADAPTER Adapter, struct sk_buff *Packet, USHORT Vcid)
157 } 157 }
158 else 158 else
159 { 159 {
160 struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, QueueIndex); 160 struct net_device_stats *netstats = &Adapter->dev->stats;
161 Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength; 161 Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
162 162
163 txq->tx_bytes += Leader.PLength; 163 netstats->tx_bytes += Leader.PLength;
164 ++txq->tx_packets; 164 ++netstats->tx_packets;
165 165
166 Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3; 166 Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
167 Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len); 167 Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index bdd629d72a75..cd8392badff0 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -209,11 +209,8 @@ static void wl_ops_stop(struct ieee80211_hw *hw)
209 struct wl_info *wl = hw->priv; 209 struct wl_info *wl = hw->priv;
210 ASSERT(wl); 210 ASSERT(wl);
211 WL_LOCK(wl); 211 WL_LOCK(wl);
212 wl_down(wl);
213 ieee80211_stop_queues(hw); 212 ieee80211_stop_queues(hw);
214 WL_UNLOCK(wl); 213 WL_UNLOCK(wl);
215
216 return;
217} 214}
218 215
219static int 216static int
@@ -246,7 +243,14 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
246static void 243static void
247wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 244wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
248{ 245{
249 return; 246 struct wl_info *wl;
247
248 wl = HW_TO_WL(hw);
249
250 /* put driver in down state */
251 WL_LOCK(wl);
252 wl_down(wl);
253 WL_UNLOCK(wl);
250} 254}
251 255
252static int 256static int
@@ -259,9 +263,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
259 switch (type) { 263 switch (type) {
260 case NL80211_CHAN_HT20: 264 case NL80211_CHAN_HT20:
261 case NL80211_CHAN_NO_HT: 265 case NL80211_CHAN_NO_HT:
262 WL_LOCK(wl);
263 err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value); 266 err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
264 WL_UNLOCK(wl);
265 break; 267 break;
266 case NL80211_CHAN_HT40MINUS: 268 case NL80211_CHAN_HT40MINUS:
267 case NL80211_CHAN_HT40PLUS: 269 case NL80211_CHAN_HT40PLUS:
@@ -281,6 +283,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
281 int err = 0; 283 int err = 0;
282 int new_int; 284 int new_int;
283 285
286 WL_LOCK(wl);
284 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 287 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
285 WL_NONE("%s: Setting listen interval to %d\n", 288 WL_NONE("%s: Setting listen interval to %d\n",
286 __func__, conf->listen_interval); 289 __func__, conf->listen_interval);
@@ -337,6 +340,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
337 } 340 }
338 341
339 config_out: 342 config_out:
343 WL_UNLOCK(wl);
340 return err; 344 return err;
341} 345}
342 346
@@ -455,13 +459,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
455 459
456static void wl_ops_sw_scan_start(struct ieee80211_hw *hw) 460static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
457{ 461{
462 struct wl_info *wl = hw->priv;
458 WL_NONE("Scan Start\n"); 463 WL_NONE("Scan Start\n");
464 WL_LOCK(wl);
465 wlc_scan_start(wl->wlc);
466 WL_UNLOCK(wl);
459 return; 467 return;
460} 468}
461 469
462static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw) 470static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
463{ 471{
472 struct wl_info *wl = hw->priv;
464 WL_NONE("Scan Complete\n"); 473 WL_NONE("Scan Complete\n");
474 WL_LOCK(wl);
475 wlc_scan_stop(wl->wlc);
476 WL_UNLOCK(wl);
465 return; 477 return;
466} 478}
467 479
@@ -779,7 +791,7 @@ static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
779 wl_found++; 791 wl_found++;
780 return wl; 792 return wl;
781 793
782 fail: 794fail:
783 wl_free(wl); 795 wl_free(wl);
784fail1: 796fail1:
785 return NULL; 797 return NULL;
@@ -1090,7 +1102,6 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1090 return 0; 1102 return 0;
1091} 1103}
1092 1104
1093#ifdef LINUXSTA_PS
1094static int wl_suspend(struct pci_dev *pdev, pm_message_t state) 1105static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
1095{ 1106{
1096 struct wl_info *wl; 1107 struct wl_info *wl;
@@ -1105,11 +1116,12 @@ static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
1105 return -ENODEV; 1116 return -ENODEV;
1106 } 1117 }
1107 1118
1119 /* only need to flag hw is down for proper resume */
1108 WL_LOCK(wl); 1120 WL_LOCK(wl);
1109 wl_down(wl);
1110 wl->pub->hw_up = false; 1121 wl->pub->hw_up = false;
1111 WL_UNLOCK(wl); 1122 WL_UNLOCK(wl);
1112 pci_save_state(pdev, wl->pci_psstate); 1123
1124 pci_save_state(pdev);
1113 pci_disable_device(pdev); 1125 pci_disable_device(pdev);
1114 return pci_set_power_state(pdev, PCI_D3hot); 1126 return pci_set_power_state(pdev, PCI_D3hot);
1115} 1127}
@@ -1133,7 +1145,7 @@ static int wl_resume(struct pci_dev *pdev)
1133 if (err) 1145 if (err)
1134 return err; 1146 return err;
1135 1147
1136 pci_restore_state(pdev, wl->pci_psstate); 1148 pci_restore_state(pdev);
1137 1149
1138 err = pci_enable_device(pdev); 1150 err = pci_enable_device(pdev);
1139 if (err) 1151 if (err)
@@ -1145,13 +1157,12 @@ static int wl_resume(struct pci_dev *pdev)
1145 if ((val & 0x0000ff00) != 0) 1157 if ((val & 0x0000ff00) != 0)
1146 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 1158 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1147 1159
1148 WL_LOCK(wl); 1160 /*
1149 err = wl_up(wl); 1161 * done. driver will be put in up state
1150 WL_UNLOCK(wl); 1162 * in wl_ops_add_interface() call.
1151 1163 */
1152 return err; 1164 return err;
1153} 1165}
1154#endif /* LINUXSTA_PS */
1155 1166
1156static void wl_remove(struct pci_dev *pdev) 1167static void wl_remove(struct pci_dev *pdev)
1157{ 1168{
@@ -1184,14 +1195,12 @@ static void wl_remove(struct pci_dev *pdev)
1184} 1195}
1185 1196
1186static struct pci_driver wl_pci_driver = { 1197static struct pci_driver wl_pci_driver = {
1187 .name = "brcm80211", 1198 .name = "brcm80211",
1188 .probe = wl_pci_probe, 1199 .probe = wl_pci_probe,
1189#ifdef LINUXSTA_PS 1200 .suspend = wl_suspend,
1190 .suspend = wl_suspend, 1201 .resume = wl_resume,
1191 .resume = wl_resume, 1202 .remove = __devexit_p(wl_remove),
1192#endif /* LINUXSTA_PS */ 1203 .id_table = wl_id_table,
1193 .remove = __devexit_p(wl_remove),
1194 .id_table = wl_id_table,
1195}; 1204};
1196 1205
1197/** 1206/**
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index 1d5d01ac0a9b..e37e8058e2b8 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -5126,7 +5126,6 @@ wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
5126 fifo = prio2fifo[prio]; 5126 fifo = prio2fifo[prio];
5127 5127
5128 ASSERT((uint) skb_headroom(sdu) >= TXOFF); 5128 ASSERT((uint) skb_headroom(sdu) >= TXOFF);
5129 ASSERT(!(sdu->cloned));
5130 ASSERT(!(sdu->next)); 5129 ASSERT(!(sdu->next));
5131 ASSERT(!(sdu->prev)); 5130 ASSERT(!(sdu->prev));
5132 ASSERT(fifo < NFIFO); 5131 ASSERT(fifo < NFIFO);
@@ -8462,3 +8461,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
8462 8461
8463 kfree(qi); 8462 kfree(qi);
8464} 8463}
8464
8465/*
8466 * Flag 'scan in progress' to withold dynamic phy calibration
8467 */
8468void wlc_scan_start(struct wlc_info *wlc)
8469{
8470 wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
8471}
8472
8473void wlc_scan_stop(struct wlc_info *wlc)
8474{
8475 wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
8476}
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index 146a6904a39b..aff413001b70 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
570extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate); 570extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
571extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg); 571extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
572extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg); 572extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
573extern void wlc_scan_start(struct wlc_info *wlc);
574extern void wlc_scan_stop(struct wlc_info *wlc);
573 575
574static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name, 576static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
575 uint *arg) 577 uint *arg)
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad47326d6dc..1502d80f6f78 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO
439config COMEDI_NI_ATMIO 439config COMEDI_NI_ATMIO
440 tristate "NI AT-MIO E series ISA-PNP card support" 440 tristate "NI AT-MIO E series ISA-PNP card support"
441 depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON 441 depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
442 select COMEDI_8255
442 default N 443 default N
443 ---help--- 444 ---help---
444 Enable support for National Instruments AT-MIO E series cards 445 Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO
1040config COMEDI_NI_PCIMIO 1041config COMEDI_NI_PCIMIO
1041 tristate "NI PCI-MIO-E series and M series support" 1042 tristate "NI PCI-MIO-E series and M series support"
1042 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON 1043 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
1044 select COMEDI_8255
1045 select COMEDI_FC
1043 default N 1046 default N
1044 ---help--- 1047 ---help---
1045 Enable support for National Instruments PCI-MIO-E series and M series 1048 Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS
1164config COMEDI_NI_MIO_CS 1167config COMEDI_NI_MIO_CS
1165 tristate "NI DAQCard E series PCMCIA support" 1168 tristate "NI DAQCard E series PCMCIA support"
1166 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON 1169 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
1170 select COMEDI_8255
1167 select COMEDI_FC 1171 select COMEDI_FC
1168 default N 1172 default N
1169 ---help--- 1173 ---help---
@@ -1268,7 +1272,6 @@ config COMEDI_MITE
1268config COMEDI_NI_TIO 1272config COMEDI_NI_TIO
1269 tristate "NI general purpose counter support" 1273 tristate "NI general purpose counter support"
1270 depends on COMEDI_MITE 1274 depends on COMEDI_MITE
1271 select COMEDI_8255
1272 default N 1275 default N
1273 ---help--- 1276 ---help---
1274 Enable support for National Instruments general purpose counters. 1277 Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b241cc1f..fd274e9c7b78 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
61#define PCI_DAQ_SIZE 4096 61#define PCI_DAQ_SIZE 4096
62#define PCI_DAQ_SIZE_660X 8192 62#define PCI_DAQ_SIZE_660X 8192
63 63
64MODULE_LICENSE("GPL");
65
66struct mite_struct *mite_devices; 64struct mite_struct *mite_devices;
67EXPORT_SYMBOL(mite_devices); 65EXPORT_SYMBOL(mite_devices);
68 66
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e99a5c..54741c9e1af5 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
527 527
528module_init(driver_ni6527_init_module); 528module_init(driver_ni6527_init_module);
529module_exit(driver_ni6527_cleanup_module); 529module_exit(driver_ni6527_cleanup_module);
530
531MODULE_AUTHOR("Comedi http://www.comedi.org");
532MODULE_DESCRIPTION("Comedi low-level driver");
533MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aaf77fb..403fc0997d37 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
871 871
872module_init(driver_ni_65xx_init_module); 872module_init(driver_ni_65xx_init_module);
873module_exit(driver_ni_65xx_cleanup_module); 873module_exit(driver_ni_65xx_cleanup_module);
874
875MODULE_AUTHOR("Comedi http://www.comedi.org");
876MODULE_DESCRIPTION("Comedi low-level driver");
877MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b085c4ef..ca2aeaa9449c 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
1421 }; 1421 };
1422 return 0; 1422 return 0;
1423} 1423}
1424
1425MODULE_AUTHOR("Comedi http://www.comedi.org");
1426MODULE_DESCRIPTION("Comedi low-level driver");
1427MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034efdc6f..d8d91f90060e 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
384 mite_list_devices(); 384 mite_list_devices();
385 return -EIO; 385 return -EIO;
386} 386}
387
388MODULE_AUTHOR("Comedi http://www.comedi.org");
389MODULE_DESCRIPTION("Comedi low-level driver");
390MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 4d1868d04bac..0728c3c0cb0e 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -575,7 +575,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
575 /* grab our IRQ */ 575 /* grab our IRQ */
576 if (irq) { 576 if (irq) {
577 isr_flags = 0; 577 isr_flags = 0;
578 if (thisboard->bustype == pci_bustype) 578 if (thisboard->bustype == pci_bustype
579 || thisboard->bustype == pcmcia_bustype)
579 isr_flags |= IRQF_SHARED; 580 isr_flags |= IRQF_SHARED;
580 if (request_irq(irq, labpc_interrupt, isr_flags, 581 if (request_irq(irq, labpc_interrupt, isr_flags,
581 driver_labpc.driver_name, dev)) { 582 driver_labpc.driver_name, dev)) {
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c34e484..005d2fe86ee4 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
1354 1354
1355module_init(driver_pcidio_init_module); 1355module_init(driver_pcidio_init_module);
1356module_exit(driver_pcidio_cleanup_module); 1356module_exit(driver_pcidio_cleanup_module);
1357
1358MODULE_AUTHOR("Comedi http://www.comedi.org");
1359MODULE_DESCRIPTION("Comedi low-level driver");
1360MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a381247285..9148abdad074 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
1853 1853
1854 return 0; 1854 return 0;
1855} 1855}
1856
1857MODULE_AUTHOR("Comedi http://www.comedi.org");
1858MODULE_DESCRIPTION("Comedi low-level driver");
1859MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index b3d05fcfe6d2..4fb809485d9e 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -368,6 +368,7 @@ static int blkvsc_probe(struct device *device)
368 blkdev->gd->first_minor = 0; 368 blkdev->gd->first_minor = 0;
369 blkdev->gd->fops = &block_ops; 369 blkdev->gd->fops = &block_ops;
370 blkdev->gd->private_data = blkdev; 370 blkdev->gd->private_data = blkdev;
371 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
371 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum); 372 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
372 373
373 blkvsc_do_inquiry(blkdev); 374 blkvsc_do_inquiry(blkdev);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index df9cd131e953..0edbe7483a4c 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -1279,7 +1279,7 @@ static void netvsc_channel_cb(void *context)
1279 /* ASSERT(device); */ 1279 /* ASSERT(device); */
1280 1280
1281 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char), 1281 packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
1282 GFP_KERNEL); 1282 GFP_ATOMIC);
1283 if (!packet) 1283 if (!packet)
1284 return; 1284 return;
1285 buffer = packet; 1285 buffer = packet;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 0147b407512c..b41c9640b72d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
236 if (status == 1) { 236 if (status == 1) {
237 netif_carrier_on(net); 237 netif_carrier_on(net);
238 netif_wake_queue(net); 238 netif_wake_queue(net);
239 netif_notify_peers(net);
239 } else { 240 } else {
240 netif_carrier_off(net); 241 netif_carrier_off(net);
241 netif_stop_queue(net); 242 netif_stop_queue(net);
@@ -358,7 +359,6 @@ static int netvsc_probe(struct device *device)
358 359
359 /* Set initial state */ 360 /* Set initial state */
360 netif_carrier_off(net); 361 netif_carrier_off(net);
361 netif_stop_queue(net);
362 362
363 net_device_ctx = netdev_priv(net); 363 net_device_ctx = netdev_priv(net);
364 net_device_ctx->device_ctx = device_ctx; 364 net_device_ctx->device_ctx = device_ctx;
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index deb68c8a6e18..b8b54da67c63 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -68,7 +68,7 @@ static ssize_t ad7476_show_scale(struct device *dev,
68 /* Corresponds to Vref / 2^(bits) */ 68 /* Corresponds to Vref / 2^(bits) */
69 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits; 69 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
70 70
71 return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000); 71 return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
72} 72}
73static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0); 73static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
74 74
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 685908995d49..5d85efab658c 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -68,7 +68,7 @@ static ssize_t ad7887_show_scale(struct device *dev,
68 /* Corresponds to Vref / 2^(bits) */ 68 /* Corresponds to Vref / 2^(bits) */
69 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits; 69 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
70 70
71 return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000); 71 return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
72} 72}
73static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0); 73static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
74 74
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 6309d521a864..89ccf375a188 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -432,7 +432,7 @@ static ssize_t ad799x_show_scale(struct device *dev,
432 /* Corresponds to Vref / 2^(bits) */ 432 /* Corresponds to Vref / 2^(bits) */
433 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits; 433 unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
434 434
435 return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000); 435 return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
436} 436}
437 437
438static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0); 438static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e3387cd31145..0f87ecac82fc 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -87,7 +87,7 @@ static ssize_t ad5446_show_scale(struct device *dev,
87 /* Corresponds to Vref / 2^(bits) */ 87 /* Corresponds to Vref / 2^(bits) */
88 unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits; 88 unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
89 89
90 return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000); 90 return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
91} 91}
92static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0); 92static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
93 93
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index e38e89df6e84..e2f6d6a3c850 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value)
874 sc_access[3].reg_addr = 0x109; 874 sc_access[3].reg_addr = 0x109;
875 sc_access[3].mask = MASK6; 875 sc_access[3].mask = MASK6;
876 sc_access[3].value = 0x00; 876 sc_access[3].value = 0x00;
877 num_val = 4; 877 sc_access[4].reg_addr = 0x104;
878 sc_access[4].value = 0x3C;
879 sc_access[4].mask = 0xff;
880 num_val = 5;
878 break; 881 break;
879 default: 882 default:
880 return -EINVAL; 883 return -EINVAL;
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/lirc/TODO.lirc_zilog
index 6aa312df4018..2d0263f07937 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/lirc/TODO.lirc_zilog
@@ -1,13 +1,37 @@
1The binding between hdpvr and lirc_zilog is currently disabled, 11. Both ir-kbd-i2c and lirc_zilog provide support for RX events.
2The 'tx_only' lirc_zilog module parameter will allow ir-kbd-i2c
3and lirc_zilog to coexist in the kernel, if the user requires such a set-up.
4However the IR unit will not work well without coordination between the
5two modules. A shared mutex, for transceiver access locking, needs to be
6supplied by bridge drivers, in struct IR_i2_init_data, to both ir-kbd-i2c
7and lirc_zilog, before they will coexist usefully. This should be fixed
8before moving out of staging.
9
102. References and locking need careful examination. For cx18 and ivtv PCI
11cards, which are not easily "hot unplugged", the imperfect state of reference
12counting and locking is acceptable if not correct. For USB connected units
13like HD PVR, PVR USB2, HVR-1900, and HVR1950, the likelyhood of an Ooops on
14unplug is probably great. Proper reference counting and locking needs to be
15implemented before this module is moved out of staging.
16
173. The binding between hdpvr and lirc_zilog is currently disabled,
2due to an OOPS reported a few years ago when both the hdpvr and cx18 18due to an OOPS reported a few years ago when both the hdpvr and cx18
3drivers were loaded in his system. More details can be seen at: 19drivers were loaded in his system. More details can be seen at:
4 http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html 20 http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html
5More tests need to be done, in order to fix the reported issue. 21More tests need to be done, in order to fix the reported issue.
6 22
7There's a conflict between ir-kbd-i2c: Both provide support for RX events. 234. In addition to providing a shared mutex for transceiver access
8Such conflict needs to be fixed, before moving it out of staging. 24locking, bridge drivers, if able, should provide a chip reset() callback
25to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
26to perform Z8 chip resets via GPIO manipulations. This will allow lirc_zilog
27to bring the chip back to normal when it hangs, in the same places the
28original lirc_pvr150 driver code does. This is not strictly needed, so it
29is not required to move lirc_zilog out of staging.
30
315. Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
32and installed on Hauppauge products. When working on either module, developers
33must consider at least the following bridge drivers which mention an IR Rx unit
34at address 0x71 (indicative of a Z8):
9 35
10The way I2C probe works, it will try to register the driver twice, one 36 ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
11for RX and another for TX. The logic needs to be fixed to avoid such
12issue.
13 37
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 0da6b9518af9..235cab0eb087 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -447,6 +447,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
447 447
448exit: 448exit:
449 mutex_unlock(&context->ctx_lock); 449 mutex_unlock(&context->ctx_lock);
450 kfree(data_buf);
450 451
451 return (!retval) ? n_bytes : retval; 452 return (!retval) ? n_bytes : retval;
452} 453}
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 929ae5795467..5938616f3e8f 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -232,6 +232,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
232 i++; 232 i++;
233 } 233 }
234 terminate_send(tx_buf[i - 1]); 234 terminate_send(tx_buf[i - 1]);
235 kfree(tx_buf);
235 return n; 236 return n;
236} 237}
237 238
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index dfd2c447e67d..3a9c09881b2b 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -376,6 +376,7 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
376 unsigned long flags; 376 unsigned long flags;
377 int counttimer; 377 int counttimer;
378 int *wbuf; 378 int *wbuf;
379 ssize_t ret;
379 380
380 if (!is_claimed) 381 if (!is_claimed)
381 return -EBUSY; 382 return -EBUSY;
@@ -393,8 +394,10 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
393 if (timer == 0) { 394 if (timer == 0) {
394 /* try again if device is ready */ 395 /* try again if device is ready */
395 timer = init_lirc_timer(); 396 timer = init_lirc_timer();
396 if (timer == 0) 397 if (timer == 0) {
397 return -EIO; 398 ret = -EIO;
399 goto out;
400 }
398 } 401 }
399 402
400 /* adjust values from usecs */ 403 /* adjust values from usecs */
@@ -420,7 +423,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
420 if (check_pselecd && (in(1) & LP_PSELECD)) { 423 if (check_pselecd && (in(1) & LP_PSELECD)) {
421 lirc_off(); 424 lirc_off();
422 local_irq_restore(flags); 425 local_irq_restore(flags);
423 return -EIO; 426 ret = -EIO;
427 goto out;
424 } 428 }
425 } while (counttimer < wbuf[i]); 429 } while (counttimer < wbuf[i]);
426 i++; 430 i++;
@@ -436,7 +440,8 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
436 level = newlevel; 440 level = newlevel;
437 if (check_pselecd && (in(1) & LP_PSELECD)) { 441 if (check_pselecd && (in(1) & LP_PSELECD)) {
438 local_irq_restore(flags); 442 local_irq_restore(flags);
439 return -EIO; 443 ret = -EIO;
444 goto out;
440 } 445 }
441 } while (counttimer < wbuf[i]); 446 } while (counttimer < wbuf[i]);
442 i++; 447 i++;
@@ -445,7 +450,11 @@ static ssize_t lirc_write(struct file *filep, const char *buf, size_t n,
445#else 450#else
446 /* place code that handles write without external timer here */ 451 /* place code that handles write without external timer here */
447#endif 452#endif
448 return n; 453 ret = n;
454out:
455 kfree(wbuf);
456
457 return ret;
449} 458}
450 459
451static unsigned int lirc_poll(struct file *file, poll_table *wait) 460static unsigned int lirc_poll(struct file *file, poll_table *wait)
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 998485ebdbce..925eabe14854 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -448,6 +448,7 @@ static ssize_t vfd_write(struct file *file, const char *buf,
448exit: 448exit:
449 449
450 mutex_unlock(&context->ctx_lock); 450 mutex_unlock(&context->ctx_lock);
451 kfree(data_buf);
451 452
452 return (!retval) ? n_bytes : retval; 453 return (!retval) ? n_bytes : retval;
453} 454}
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 9bcf149c4260..1c3099b388e0 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -966,7 +966,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
966 if (n % sizeof(int) || count % 2 == 0) 966 if (n % sizeof(int) || count % 2 == 0)
967 return -EINVAL; 967 return -EINVAL;
968 wbuf = memdup_user(buf, n); 968 wbuf = memdup_user(buf, n);
969 if (PTR_ERR(wbuf)) 969 if (IS_ERR(wbuf))
970 return PTR_ERR(wbuf); 970 return PTR_ERR(wbuf);
971 spin_lock_irqsave(&hardware[type].lock, flags); 971 spin_lock_irqsave(&hardware[type].lock, flags);
972 if (type == LIRC_IRDEO) { 972 if (type == LIRC_IRDEO) {
@@ -981,6 +981,7 @@ static ssize_t lirc_write(struct file *file, const char *buf,
981 } 981 }
982 off(); 982 off();
983 spin_unlock_irqrestore(&hardware[type].lock, flags); 983 spin_unlock_irqrestore(&hardware[type].lock, flags);
984 kfree(wbuf);
984 return n; 985 return n;
985} 986}
986 987
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index c553ab626238..76be7b8c6209 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -330,6 +330,7 @@ static ssize_t lirc_write(struct file *file, const char *buf, size_t n,
330 /* enable receiver */ 330 /* enable receiver */
331 Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE; 331 Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE;
332#endif 332#endif
333 kfree(tx_buf);
333 return count; 334 return count;
334} 335}
335 336
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index ad29bb1275ab..0aad0d7a74a3 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -20,6 +20,9 @@
20 * 20 *
21 * parts are cut&pasted from the lirc_i2c.c driver 21 * parts are cut&pasted from the lirc_i2c.c driver
22 * 22 *
23 * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
24 * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
25 *
23 * This program is free software; you can redistribute it and/or modify 26 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by 27 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or 28 * the Free Software Foundation; either version 2 of the License, or
@@ -60,38 +63,44 @@
60#include <media/lirc_dev.h> 63#include <media/lirc_dev.h>
61#include <media/lirc.h> 64#include <media/lirc.h>
62 65
63struct IR { 66struct IR_rx {
64 struct lirc_driver l;
65
66 /* Device info */
67 struct mutex ir_lock;
68 int open;
69 bool is_hdpvr;
70
71 /* RX device */ 67 /* RX device */
72 struct i2c_client c_rx; 68 struct i2c_client *c;
73 int have_rx;
74 69
75 /* RX device buffer & lock */ 70 /* RX device buffer & lock */
76 struct lirc_buffer buf; 71 struct lirc_buffer buf;
77 struct mutex buf_lock; 72 struct mutex buf_lock;
78 73
79 /* RX polling thread data */ 74 /* RX polling thread data */
80 struct completion *t_notify;
81 struct completion *t_notify2;
82 int shutdown;
83 struct task_struct *task; 75 struct task_struct *task;
84 76
85 /* RX read data */ 77 /* RX read data */
86 unsigned char b[3]; 78 unsigned char b[3];
79 bool hdpvr_data_fmt;
80};
87 81
82struct IR_tx {
88 /* TX device */ 83 /* TX device */
89 struct i2c_client c_tx; 84 struct i2c_client *c;
85
86 /* TX additional actions needed */
90 int need_boot; 87 int need_boot;
91 int have_tx; 88 bool post_tx_ready_poll;
89};
90
91struct IR {
92 struct lirc_driver l;
93
94 struct mutex ir_lock;
95 int open;
96
97 struct i2c_adapter *adapter;
98 struct IR_rx *rx;
99 struct IR_tx *tx;
92}; 100};
93 101
94/* Minor -> data mapping */ 102/* Minor -> data mapping */
103static struct mutex ir_devices_lock;
95static struct IR *ir_devices[MAX_IRCTL_DEVICES]; 104static struct IR *ir_devices[MAX_IRCTL_DEVICES];
96 105
97/* Block size for IR transmitter */ 106/* Block size for IR transmitter */
@@ -124,14 +133,11 @@ static struct mutex tx_data_lock;
124#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \ 133#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
125 ## args) 134 ## args)
126#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) 135#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
127 136#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
128#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
129#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
130 137
131/* module parameters */ 138/* module parameters */
132static int debug; /* debug output */ 139static int debug; /* debug output */
133static int disable_rx; /* disable RX device */ 140static int tx_only; /* only handle the IR Tx function */
134static int disable_tx; /* disable TX device */
135static int minor = -1; /* minor number */ 141static int minor = -1; /* minor number */
136 142
137#define dprintk(fmt, args...) \ 143#define dprintk(fmt, args...) \
@@ -150,8 +156,12 @@ static int add_to_buf(struct IR *ir)
150 int ret; 156 int ret;
151 int failures = 0; 157 int failures = 0;
152 unsigned char sendbuf[1] = { 0 }; 158 unsigned char sendbuf[1] = { 0 };
159 struct IR_rx *rx = ir->rx;
153 160
154 if (lirc_buffer_full(&ir->buf)) { 161 if (rx == NULL)
162 return -ENXIO;
163
164 if (lirc_buffer_full(&rx->buf)) {
155 dprintk("buffer overflow\n"); 165 dprintk("buffer overflow\n");
156 return -EOVERFLOW; 166 return -EOVERFLOW;
157 } 167 }
@@ -161,17 +171,25 @@ static int add_to_buf(struct IR *ir)
161 * data and we have space 171 * data and we have space
162 */ 172 */
163 do { 173 do {
174 if (kthread_should_stop())
175 return -ENODATA;
176
164 /* 177 /*
165 * Lock i2c bus for the duration. RX/TX chips interfere so 178 * Lock i2c bus for the duration. RX/TX chips interfere so
166 * this is worth it 179 * this is worth it
167 */ 180 */
168 mutex_lock(&ir->ir_lock); 181 mutex_lock(&ir->ir_lock);
169 182
183 if (kthread_should_stop()) {
184 mutex_unlock(&ir->ir_lock);
185 return -ENODATA;
186 }
187
170 /* 188 /*
171 * Send random "poll command" (?) Windows driver does this 189 * Send random "poll command" (?) Windows driver does this
172 * and it is a good point to detect chip failure. 190 * and it is a good point to detect chip failure.
173 */ 191 */
174 ret = i2c_master_send(&ir->c_rx, sendbuf, 1); 192 ret = i2c_master_send(rx->c, sendbuf, 1);
175 if (ret != 1) { 193 if (ret != 1) {
176 zilog_error("i2c_master_send failed with %d\n", ret); 194 zilog_error("i2c_master_send failed with %d\n", ret);
177 if (failures >= 3) { 195 if (failures >= 3) {
@@ -186,45 +204,53 @@ static int add_to_buf(struct IR *ir)
186 "trying reset\n"); 204 "trying reset\n");
187 205
188 set_current_state(TASK_UNINTERRUPTIBLE); 206 set_current_state(TASK_UNINTERRUPTIBLE);
207 if (kthread_should_stop()) {
208 mutex_unlock(&ir->ir_lock);
209 return -ENODATA;
210 }
189 schedule_timeout((100 * HZ + 999) / 1000); 211 schedule_timeout((100 * HZ + 999) / 1000);
190 ir->need_boot = 1; 212 ir->tx->need_boot = 1;
191 213
192 ++failures; 214 ++failures;
193 mutex_unlock(&ir->ir_lock); 215 mutex_unlock(&ir->ir_lock);
194 continue; 216 continue;
195 } 217 }
196 218
197 ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf)); 219 if (kthread_should_stop()) {
220 mutex_unlock(&ir->ir_lock);
221 return -ENODATA;
222 }
223 ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
198 mutex_unlock(&ir->ir_lock); 224 mutex_unlock(&ir->ir_lock);
199 if (ret != sizeof(keybuf)) { 225 if (ret != sizeof(keybuf)) {
200 zilog_error("i2c_master_recv failed with %d -- " 226 zilog_error("i2c_master_recv failed with %d -- "
201 "keeping last read buffer\n", ret); 227 "keeping last read buffer\n", ret);
202 } else { 228 } else {
203 ir->b[0] = keybuf[3]; 229 rx->b[0] = keybuf[3];
204 ir->b[1] = keybuf[4]; 230 rx->b[1] = keybuf[4];
205 ir->b[2] = keybuf[5]; 231 rx->b[2] = keybuf[5];
206 dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]); 232 dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
207 } 233 }
208 234
209 /* key pressed ? */ 235 /* key pressed ? */
210 if (ir->is_hdpvr) { 236 if (rx->hdpvr_data_fmt) {
211 if (got_data && (keybuf[0] == 0x80)) 237 if (got_data && (keybuf[0] == 0x80))
212 return 0; 238 return 0;
213 else if (got_data && (keybuf[0] == 0x00)) 239 else if (got_data && (keybuf[0] == 0x00))
214 return -ENODATA; 240 return -ENODATA;
215 } else if ((ir->b[0] & 0x80) == 0) 241 } else if ((rx->b[0] & 0x80) == 0)
216 return got_data ? 0 : -ENODATA; 242 return got_data ? 0 : -ENODATA;
217 243
218 /* look what we have */ 244 /* look what we have */
219 code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2); 245 code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
220 246
221 codes[0] = (code >> 8) & 0xff; 247 codes[0] = (code >> 8) & 0xff;
222 codes[1] = code & 0xff; 248 codes[1] = code & 0xff;
223 249
224 /* return it */ 250 /* return it */
225 lirc_buffer_write(&ir->buf, codes); 251 lirc_buffer_write(&rx->buf, codes);
226 ++got_data; 252 ++got_data;
227 } while (!lirc_buffer_full(&ir->buf)); 253 } while (!lirc_buffer_full(&rx->buf));
228 254
229 return 0; 255 return 0;
230} 256}
@@ -242,46 +268,35 @@ static int add_to_buf(struct IR *ir)
242static int lirc_thread(void *arg) 268static int lirc_thread(void *arg)
243{ 269{
244 struct IR *ir = arg; 270 struct IR *ir = arg;
245 271 struct IR_rx *rx = ir->rx;
246 if (ir->t_notify != NULL)
247 complete(ir->t_notify);
248 272
249 dprintk("poll thread started\n"); 273 dprintk("poll thread started\n");
250 274
251 do { 275 while (!kthread_should_stop()) {
252 if (ir->open) { 276 set_current_state(TASK_INTERRUPTIBLE);
253 set_current_state(TASK_INTERRUPTIBLE);
254 277
255 /* 278 /* if device not opened, we can sleep half a second */
256 * This is ~113*2 + 24 + jitter (2*repeat gap + 279 if (!ir->open) {
257 * code length). We use this interval as the chip
258 * resets every time you poll it (bad!). This is
259 * therefore just sufficient to catch all of the
260 * button presses. It makes the remote much more
261 * responsive. You can see the difference by
262 * running irw and holding down a button. With
263 * 100ms, the old polling interval, you'll notice
264 * breaks in the repeat sequence corresponding to
265 * lost keypresses.
266 */
267 schedule_timeout((260 * HZ) / 1000);
268 if (ir->shutdown)
269 break;
270 if (!add_to_buf(ir))
271 wake_up_interruptible(&ir->buf.wait_poll);
272 } else {
273 /* if device not opened so we can sleep half a second */
274 set_current_state(TASK_INTERRUPTIBLE);
275 schedule_timeout(HZ/2); 280 schedule_timeout(HZ/2);
281 continue;
276 } 282 }
277 } while (!ir->shutdown);
278
279 if (ir->t_notify2 != NULL)
280 wait_for_completion(ir->t_notify2);
281 283
282 ir->task = NULL; 284 /*
283 if (ir->t_notify != NULL) 285 * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
284 complete(ir->t_notify); 286 * We use this interval as the chip resets every time you poll
287 * it (bad!). This is therefore just sufficient to catch all
288 * of the button presses. It makes the remote much more
289 * responsive. You can see the difference by running irw and
290 * holding down a button. With 100ms, the old polling
291 * interval, you'll notice breaks in the repeat sequence
292 * corresponding to lost keypresses.
293 */
294 schedule_timeout((260 * HZ) / 1000);
295 if (kthread_should_stop())
296 break;
297 if (!add_to_buf(ir))
298 wake_up_interruptible(&rx->buf.wait_poll);
299 }
285 300
286 dprintk("poll thread ended\n"); 301 dprintk("poll thread ended\n");
287 return 0; 302 return 0;
@@ -299,10 +314,10 @@ static int set_use_inc(void *data)
299 * this is completely broken code. lirc_unregister_driver() 314 * this is completely broken code. lirc_unregister_driver()
300 * must be possible even when the device is open 315 * must be possible even when the device is open
301 */ 316 */
302 if (ir->c_rx.addr) 317 if (ir->rx != NULL)
303 i2c_use_client(&ir->c_rx); 318 i2c_use_client(ir->rx->c);
304 if (ir->c_tx.addr) 319 if (ir->tx != NULL)
305 i2c_use_client(&ir->c_tx); 320 i2c_use_client(ir->tx->c);
306 321
307 return 0; 322 return 0;
308} 323}
@@ -311,10 +326,10 @@ static void set_use_dec(void *data)
311{ 326{
312 struct IR *ir = data; 327 struct IR *ir = data;
313 328
314 if (ir->c_rx.addr) 329 if (ir->rx)
315 i2c_release_client(&ir->c_rx); 330 i2c_release_client(ir->rx->c);
316 if (ir->c_tx.addr) 331 if (ir->tx)
317 i2c_release_client(&ir->c_tx); 332 i2c_release_client(ir->tx->c);
318 if (ir->l.owner != NULL) 333 if (ir->l.owner != NULL)
319 module_put(ir->l.owner); 334 module_put(ir->l.owner);
320} 335}
@@ -453,7 +468,7 @@ corrupt:
453} 468}
454 469
455/* send a block of data to the IR TX device */ 470/* send a block of data to the IR TX device */
456static int send_data_block(struct IR *ir, unsigned char *data_block) 471static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
457{ 472{
458 int i, j, ret; 473 int i, j, ret;
459 unsigned char buf[5]; 474 unsigned char buf[5];
@@ -467,7 +482,7 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
467 buf[1 + j] = data_block[i + j]; 482 buf[1 + j] = data_block[i + j];
468 dprintk("%02x %02x %02x %02x %02x", 483 dprintk("%02x %02x %02x %02x %02x",
469 buf[0], buf[1], buf[2], buf[3], buf[4]); 484 buf[0], buf[1], buf[2], buf[3], buf[4]);
470 ret = i2c_master_send(&ir->c_tx, buf, tosend + 1); 485 ret = i2c_master_send(tx->c, buf, tosend + 1);
471 if (ret != tosend + 1) { 486 if (ret != tosend + 1) {
472 zilog_error("i2c_master_send failed with %d\n", ret); 487 zilog_error("i2c_master_send failed with %d\n", ret);
473 return ret < 0 ? ret : -EFAULT; 488 return ret < 0 ? ret : -EFAULT;
@@ -478,38 +493,50 @@ static int send_data_block(struct IR *ir, unsigned char *data_block)
478} 493}
479 494
480/* send boot data to the IR TX device */ 495/* send boot data to the IR TX device */
481static int send_boot_data(struct IR *ir) 496static int send_boot_data(struct IR_tx *tx)
482{ 497{
483 int ret; 498 int ret, i;
484 unsigned char buf[4]; 499 unsigned char buf[4];
485 500
486 /* send the boot block */ 501 /* send the boot block */
487 ret = send_data_block(ir, tx_data->boot_data); 502 ret = send_data_block(tx, tx_data->boot_data);
488 if (ret != 0) 503 if (ret != 0)
489 return ret; 504 return ret;
490 505
491 /* kick it off? */ 506 /* Hit the go button to activate the new boot data */
492 buf[0] = 0x00; 507 buf[0] = 0x00;
493 buf[1] = 0x20; 508 buf[1] = 0x20;
494 ret = i2c_master_send(&ir->c_tx, buf, 2); 509 ret = i2c_master_send(tx->c, buf, 2);
495 if (ret != 2) { 510 if (ret != 2) {
496 zilog_error("i2c_master_send failed with %d\n", ret); 511 zilog_error("i2c_master_send failed with %d\n", ret);
497 return ret < 0 ? ret : -EFAULT; 512 return ret < 0 ? ret : -EFAULT;
498 } 513 }
499 ret = i2c_master_send(&ir->c_tx, buf, 1); 514
515 /*
516 * Wait for zilog to settle after hitting go post boot block upload.
517 * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
518 * upon attempting to get firmware revision, and tx probe thus fails.
519 */
520 for (i = 0; i < 10; i++) {
521 ret = i2c_master_send(tx->c, buf, 1);
522 if (ret == 1)
523 break;
524 udelay(100);
525 }
526
500 if (ret != 1) { 527 if (ret != 1) {
501 zilog_error("i2c_master_send failed with %d\n", ret); 528 zilog_error("i2c_master_send failed with %d\n", ret);
502 return ret < 0 ? ret : -EFAULT; 529 return ret < 0 ? ret : -EFAULT;
503 } 530 }
504 531
505 /* Here comes the firmware version... (hopefully) */ 532 /* Here comes the firmware version... (hopefully) */
506 ret = i2c_master_recv(&ir->c_tx, buf, 4); 533 ret = i2c_master_recv(tx->c, buf, 4);
507 if (ret != 4) { 534 if (ret != 4) {
508 zilog_error("i2c_master_recv failed with %d\n", ret); 535 zilog_error("i2c_master_recv failed with %d\n", ret);
509 return 0; 536 return 0;
510 } 537 }
511 if (buf[0] != 0x80) { 538 if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
512 zilog_error("unexpected IR TX response: %02x\n", buf[0]); 539 zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
513 return 0; 540 return 0;
514 } 541 }
515 zilog_notify("Zilog/Hauppauge IR blaster firmware version " 542 zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -543,7 +570,7 @@ static void fw_unload(void)
543} 570}
544 571
545/* load "firmware" for the IR TX device */ 572/* load "firmware" for the IR TX device */
546static int fw_load(struct IR *ir) 573static int fw_load(struct IR_tx *tx)
547{ 574{
548 int ret; 575 int ret;
549 unsigned int i; 576 unsigned int i;
@@ -558,7 +585,7 @@ static int fw_load(struct IR *ir)
558 } 585 }
559 586
560 /* Request codeset data file */ 587 /* Request codeset data file */
561 ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev); 588 ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &tx->c->dev);
562 if (ret != 0) { 589 if (ret != 0) {
563 zilog_error("firmware haup-ir-blaster.bin not available " 590 zilog_error("firmware haup-ir-blaster.bin not available "
564 "(%d)\n", ret); 591 "(%d)\n", ret);
@@ -685,20 +712,20 @@ out:
685} 712}
686 713
687/* initialise the IR TX device */ 714/* initialise the IR TX device */
688static int tx_init(struct IR *ir) 715static int tx_init(struct IR_tx *tx)
689{ 716{
690 int ret; 717 int ret;
691 718
692 /* Load 'firmware' */ 719 /* Load 'firmware' */
693 ret = fw_load(ir); 720 ret = fw_load(tx);
694 if (ret != 0) 721 if (ret != 0)
695 return ret; 722 return ret;
696 723
697 /* Send boot block */ 724 /* Send boot block */
698 ret = send_boot_data(ir); 725 ret = send_boot_data(tx);
699 if (ret != 0) 726 if (ret != 0)
700 return ret; 727 return ret;
701 ir->need_boot = 0; 728 tx->need_boot = 0;
702 729
703 /* Looks good */ 730 /* Looks good */
704 return 0; 731 return 0;
@@ -714,20 +741,20 @@ static loff_t lseek(struct file *filep, loff_t offset, int orig)
714static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos) 741static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
715{ 742{
716 struct IR *ir = filep->private_data; 743 struct IR *ir = filep->private_data;
717 unsigned char buf[ir->buf.chunk_size]; 744 struct IR_rx *rx = ir->rx;
718 int ret = 0, written = 0; 745 int ret = 0, written = 0;
719 DECLARE_WAITQUEUE(wait, current); 746 DECLARE_WAITQUEUE(wait, current);
720 747
721 dprintk("read called\n"); 748 dprintk("read called\n");
722 if (ir->c_rx.addr == 0) 749 if (rx == NULL)
723 return -ENODEV; 750 return -ENODEV;
724 751
725 if (mutex_lock_interruptible(&ir->buf_lock)) 752 if (mutex_lock_interruptible(&rx->buf_lock))
726 return -ERESTARTSYS; 753 return -ERESTARTSYS;
727 754
728 if (n % ir->buf.chunk_size) { 755 if (n % rx->buf.chunk_size) {
729 dprintk("read result = -EINVAL\n"); 756 dprintk("read result = -EINVAL\n");
730 mutex_unlock(&ir->buf_lock); 757 mutex_unlock(&rx->buf_lock);
731 return -EINVAL; 758 return -EINVAL;
732 } 759 }
733 760
@@ -736,7 +763,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
736 * to avoid losing scan code (in case when queue is awaken somewhere 763 * to avoid losing scan code (in case when queue is awaken somewhere
737 * between while condition checking and scheduling) 764 * between while condition checking and scheduling)
738 */ 765 */
739 add_wait_queue(&ir->buf.wait_poll, &wait); 766 add_wait_queue(&rx->buf.wait_poll, &wait);
740 set_current_state(TASK_INTERRUPTIBLE); 767 set_current_state(TASK_INTERRUPTIBLE);
741 768
742 /* 769 /*
@@ -744,7 +771,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
744 * mode and 'copy_to_user' is happy, wait for data. 771 * mode and 'copy_to_user' is happy, wait for data.
745 */ 772 */
746 while (written < n && ret == 0) { 773 while (written < n && ret == 0) {
747 if (lirc_buffer_empty(&ir->buf)) { 774 if (lirc_buffer_empty(&rx->buf)) {
748 /* 775 /*
749 * According to the read(2) man page, 'written' can be 776 * According to the read(2) man page, 'written' can be
750 * returned as less than 'n', instead of blocking 777 * returned as less than 'n', instead of blocking
@@ -764,16 +791,17 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
764 schedule(); 791 schedule();
765 set_current_state(TASK_INTERRUPTIBLE); 792 set_current_state(TASK_INTERRUPTIBLE);
766 } else { 793 } else {
767 lirc_buffer_read(&ir->buf, buf); 794 unsigned char buf[rx->buf.chunk_size];
795 lirc_buffer_read(&rx->buf, buf);
768 ret = copy_to_user((void *)outbuf+written, buf, 796 ret = copy_to_user((void *)outbuf+written, buf,
769 ir->buf.chunk_size); 797 rx->buf.chunk_size);
770 written += ir->buf.chunk_size; 798 written += rx->buf.chunk_size;
771 } 799 }
772 } 800 }
773 801
774 remove_wait_queue(&ir->buf.wait_poll, &wait); 802 remove_wait_queue(&rx->buf.wait_poll, &wait);
775 set_current_state(TASK_RUNNING); 803 set_current_state(TASK_RUNNING);
776 mutex_unlock(&ir->buf_lock); 804 mutex_unlock(&rx->buf_lock);
777 805
778 dprintk("read result = %s (%d)\n", 806 dprintk("read result = %s (%d)\n",
779 ret ? "-EFAULT" : "OK", ret); 807 ret ? "-EFAULT" : "OK", ret);
@@ -782,7 +810,7 @@ static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
782} 810}
783 811
784/* send a keypress to the IR TX device */ 812/* send a keypress to the IR TX device */
785static int send_code(struct IR *ir, unsigned int code, unsigned int key) 813static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
786{ 814{
787 unsigned char data_block[TX_BLOCK_SIZE]; 815 unsigned char data_block[TX_BLOCK_SIZE];
788 unsigned char buf[2]; 816 unsigned char buf[2];
@@ -799,26 +827,34 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
799 return ret; 827 return ret;
800 828
801 /* Send the data block */ 829 /* Send the data block */
802 ret = send_data_block(ir, data_block); 830 ret = send_data_block(tx, data_block);
803 if (ret != 0) 831 if (ret != 0)
804 return ret; 832 return ret;
805 833
806 /* Send data block length? */ 834 /* Send data block length? */
807 buf[0] = 0x00; 835 buf[0] = 0x00;
808 buf[1] = 0x40; 836 buf[1] = 0x40;
809 ret = i2c_master_send(&ir->c_tx, buf, 2); 837 ret = i2c_master_send(tx->c, buf, 2);
810 if (ret != 2) { 838 if (ret != 2) {
811 zilog_error("i2c_master_send failed with %d\n", ret); 839 zilog_error("i2c_master_send failed with %d\n", ret);
812 return ret < 0 ? ret : -EFAULT; 840 return ret < 0 ? ret : -EFAULT;
813 } 841 }
814 ret = i2c_master_send(&ir->c_tx, buf, 1); 842
843 /* Give the z8 a moment to process data block */
844 for (i = 0; i < 10; i++) {
845 ret = i2c_master_send(tx->c, buf, 1);
846 if (ret == 1)
847 break;
848 udelay(100);
849 }
850
815 if (ret != 1) { 851 if (ret != 1) {
816 zilog_error("i2c_master_send failed with %d\n", ret); 852 zilog_error("i2c_master_send failed with %d\n", ret);
817 return ret < 0 ? ret : -EFAULT; 853 return ret < 0 ? ret : -EFAULT;
818 } 854 }
819 855
820 /* Send finished download? */ 856 /* Send finished download? */
821 ret = i2c_master_recv(&ir->c_tx, buf, 1); 857 ret = i2c_master_recv(tx->c, buf, 1);
822 if (ret != 1) { 858 if (ret != 1) {
823 zilog_error("i2c_master_recv failed with %d\n", ret); 859 zilog_error("i2c_master_recv failed with %d\n", ret);
824 return ret < 0 ? ret : -EFAULT; 860 return ret < 0 ? ret : -EFAULT;
@@ -832,7 +868,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
832 /* Send prepare command? */ 868 /* Send prepare command? */
833 buf[0] = 0x00; 869 buf[0] = 0x00;
834 buf[1] = 0x80; 870 buf[1] = 0x80;
835 ret = i2c_master_send(&ir->c_tx, buf, 2); 871 ret = i2c_master_send(tx->c, buf, 2);
836 if (ret != 2) { 872 if (ret != 2) {
837 zilog_error("i2c_master_send failed with %d\n", ret); 873 zilog_error("i2c_master_send failed with %d\n", ret);
838 return ret < 0 ? ret : -EFAULT; 874 return ret < 0 ? ret : -EFAULT;
@@ -843,7 +879,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
843 * last i2c_master_recv always fails with a -5, so for now, we're 879 * last i2c_master_recv always fails with a -5, so for now, we're
844 * going to skip this whole mess and say we're done on the HD PVR 880 * going to skip this whole mess and say we're done on the HD PVR
845 */ 881 */
846 if (ir->is_hdpvr) { 882 if (!tx->post_tx_ready_poll) {
847 dprintk("sent code %u, key %u\n", code, key); 883 dprintk("sent code %u, key %u\n", code, key);
848 return 0; 884 return 0;
849 } 885 }
@@ -857,7 +893,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
857 for (i = 0; i < 20; ++i) { 893 for (i = 0; i < 20; ++i) {
858 set_current_state(TASK_UNINTERRUPTIBLE); 894 set_current_state(TASK_UNINTERRUPTIBLE);
859 schedule_timeout((50 * HZ + 999) / 1000); 895 schedule_timeout((50 * HZ + 999) / 1000);
860 ret = i2c_master_send(&ir->c_tx, buf, 1); 896 ret = i2c_master_send(tx->c, buf, 1);
861 if (ret == 1) 897 if (ret == 1)
862 break; 898 break;
863 dprintk("NAK expected: i2c_master_send " 899 dprintk("NAK expected: i2c_master_send "
@@ -870,7 +906,7 @@ static int send_code(struct IR *ir, unsigned int code, unsigned int key)
870 } 906 }
871 907
872 /* Seems to be an 'ok' response */ 908 /* Seems to be an 'ok' response */
873 i = i2c_master_recv(&ir->c_tx, buf, 1); 909 i = i2c_master_recv(tx->c, buf, 1);
874 if (i != 1) { 910 if (i != 1) {
875 zilog_error("i2c_master_recv failed with %d\n", ret); 911 zilog_error("i2c_master_recv failed with %d\n", ret);
876 return -EFAULT; 912 return -EFAULT;
@@ -895,10 +931,11 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
895 loff_t *ppos) 931 loff_t *ppos)
896{ 932{
897 struct IR *ir = filep->private_data; 933 struct IR *ir = filep->private_data;
934 struct IR_tx *tx = ir->tx;
898 size_t i; 935 size_t i;
899 int failures = 0; 936 int failures = 0;
900 937
901 if (ir->c_tx.addr == 0) 938 if (tx == NULL)
902 return -ENODEV; 939 return -ENODEV;
903 940
904 /* Validate user parameters */ 941 /* Validate user parameters */
@@ -919,15 +956,15 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
919 } 956 }
920 957
921 /* Send boot data first if required */ 958 /* Send boot data first if required */
922 if (ir->need_boot == 1) { 959 if (tx->need_boot == 1) {
923 ret = send_boot_data(ir); 960 ret = send_boot_data(tx);
924 if (ret == 0) 961 if (ret == 0)
925 ir->need_boot = 0; 962 tx->need_boot = 0;
926 } 963 }
927 964
928 /* Send the code */ 965 /* Send the code */
929 if (ret == 0) { 966 if (ret == 0) {
930 ret = send_code(ir, (unsigned)command >> 16, 967 ret = send_code(tx, (unsigned)command >> 16,
931 (unsigned)command & 0xFFFF); 968 (unsigned)command & 0xFFFF);
932 if (ret == -EPROTO) { 969 if (ret == -EPROTO) {
933 mutex_unlock(&ir->ir_lock); 970 mutex_unlock(&ir->ir_lock);
@@ -952,7 +989,7 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
952 } 989 }
953 set_current_state(TASK_UNINTERRUPTIBLE); 990 set_current_state(TASK_UNINTERRUPTIBLE);
954 schedule_timeout((100 * HZ + 999) / 1000); 991 schedule_timeout((100 * HZ + 999) / 1000);
955 ir->need_boot = 1; 992 tx->need_boot = 1;
956 ++failures; 993 ++failures;
957 } else 994 } else
958 i += sizeof(int); 995 i += sizeof(int);
@@ -969,22 +1006,23 @@ static ssize_t write(struct file *filep, const char *buf, size_t n,
969static unsigned int poll(struct file *filep, poll_table *wait) 1006static unsigned int poll(struct file *filep, poll_table *wait)
970{ 1007{
971 struct IR *ir = filep->private_data; 1008 struct IR *ir = filep->private_data;
1009 struct IR_rx *rx = ir->rx;
972 unsigned int ret; 1010 unsigned int ret;
973 1011
974 dprintk("poll called\n"); 1012 dprintk("poll called\n");
975 if (ir->c_rx.addr == 0) 1013 if (rx == NULL)
976 return -ENODEV; 1014 return -ENODEV;
977 1015
978 mutex_lock(&ir->buf_lock); 1016 mutex_lock(&rx->buf_lock);
979 1017
980 poll_wait(filep, &ir->buf.wait_poll, wait); 1018 poll_wait(filep, &rx->buf.wait_poll, wait);
981 1019
982 dprintk("poll result = %s\n", 1020 dprintk("poll result = %s\n",
983 lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM"); 1021 lirc_buffer_empty(&rx->buf) ? "0" : "POLLIN|POLLRDNORM");
984 1022
985 ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM); 1023 ret = lirc_buffer_empty(&rx->buf) ? 0 : (POLLIN|POLLRDNORM);
986 1024
987 mutex_unlock(&ir->buf_lock); 1025 mutex_unlock(&rx->buf_lock);
988 return ret; 1026 return ret;
989} 1027}
990 1028
@@ -994,10 +1032,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
994 int result; 1032 int result;
995 unsigned long mode, features = 0; 1033 unsigned long mode, features = 0;
996 1034
997 if (ir->c_rx.addr != 0) 1035 features |= LIRC_CAN_SEND_PULSE;
1036 if (ir->rx != NULL)
998 features |= LIRC_CAN_REC_LIRCCODE; 1037 features |= LIRC_CAN_REC_LIRCCODE;
999 if (ir->c_tx.addr != 0)
1000 features |= LIRC_CAN_SEND_PULSE;
1001 1038
1002 switch (cmd) { 1039 switch (cmd) {
1003 case LIRC_GET_LENGTH: 1040 case LIRC_GET_LENGTH:
@@ -1024,15 +1061,9 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1024 result = -EINVAL; 1061 result = -EINVAL;
1025 break; 1062 break;
1026 case LIRC_GET_SEND_MODE: 1063 case LIRC_GET_SEND_MODE:
1027 if (!(features&LIRC_CAN_SEND_MASK))
1028 return -ENOSYS;
1029
1030 result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg); 1064 result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
1031 break; 1065 break;
1032 case LIRC_SET_SEND_MODE: 1066 case LIRC_SET_SEND_MODE:
1033 if (!(features&LIRC_CAN_SEND_MASK))
1034 return -ENOSYS;
1035
1036 result = get_user(mode, (unsigned long *) arg); 1067 result = get_user(mode, (unsigned long *) arg);
1037 if (!result && mode != LIRC_MODE_PULSE) 1068 if (!result && mode != LIRC_MODE_PULSE)
1038 return -EINVAL; 1069 return -EINVAL;
@@ -1043,6 +1074,15 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1043 return result; 1074 return result;
1044} 1075}
1045 1076
1077/* ir_devices_lock must be held */
1078static struct IR *find_ir_device_by_minor(unsigned int minor)
1079{
1080 if (minor >= MAX_IRCTL_DEVICES)
1081 return NULL;
1082
1083 return ir_devices[minor];
1084}
1085
1046/* 1086/*
1047 * Open the IR device. Get hold of our IR structure and 1087 * Open the IR device. Get hold of our IR structure and
1048 * stash it in private_data for the file 1088 * stash it in private_data for the file
@@ -1051,15 +1091,15 @@ static int open(struct inode *node, struct file *filep)
1051{ 1091{
1052 struct IR *ir; 1092 struct IR *ir;
1053 int ret; 1093 int ret;
1094 unsigned int minor = MINOR(node->i_rdev);
1054 1095
1055 /* find our IR struct */ 1096 /* find our IR struct */
1056 unsigned minor = MINOR(node->i_rdev); 1097 mutex_lock(&ir_devices_lock);
1057 if (minor >= MAX_IRCTL_DEVICES) { 1098 ir = find_ir_device_by_minor(minor);
1058 dprintk("minor %d: open result = -ENODEV\n", 1099 mutex_unlock(&ir_devices_lock);
1059 minor); 1100
1101 if (ir == NULL)
1060 return -ENODEV; 1102 return -ENODEV;
1061 }
1062 ir = ir_devices[minor];
1063 1103
1064 /* increment in use count */ 1104 /* increment in use count */
1065 mutex_lock(&ir->ir_lock); 1105 mutex_lock(&ir->ir_lock);
@@ -1106,7 +1146,6 @@ static struct lirc_driver lirc_template = {
1106 1146
1107static int ir_remove(struct i2c_client *client); 1147static int ir_remove(struct i2c_client *client);
1108static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id); 1148static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
1109static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
1110 1149
1111#define ID_FLAG_TX 0x01 1150#define ID_FLAG_TX 0x01
1112#define ID_FLAG_HDPVR 0x02 1151#define ID_FLAG_HDPVR 0x02
@@ -1126,7 +1165,6 @@ static struct i2c_driver driver = {
1126 }, 1165 },
1127 .probe = ir_probe, 1166 .probe = ir_probe,
1128 .remove = ir_remove, 1167 .remove = ir_remove,
1129 .command = ir_command,
1130 .id_table = ir_transceiver_id, 1168 .id_table = ir_transceiver_id,
1131}; 1169};
1132 1170
@@ -1144,214 +1182,253 @@ static const struct file_operations lirc_fops = {
1144 .release = close 1182 .release = close
1145}; 1183};
1146 1184
1147static int ir_remove(struct i2c_client *client) 1185static void destroy_rx_kthread(struct IR_rx *rx)
1148{ 1186{
1149 struct IR *ir = i2c_get_clientdata(client); 1187 /* end up polling thread */
1188 if (rx != NULL && !IS_ERR_OR_NULL(rx->task)) {
1189 kthread_stop(rx->task);
1190 rx->task = NULL;
1191 }
1192}
1150 1193
1151 mutex_lock(&ir->ir_lock); 1194/* ir_devices_lock must be held */
1195static int add_ir_device(struct IR *ir)
1196{
1197 int i;
1152 1198
1153 if (ir->have_rx || ir->have_tx) { 1199 for (i = 0; i < MAX_IRCTL_DEVICES; i++)
1154 DECLARE_COMPLETION(tn); 1200 if (ir_devices[i] == NULL) {
1155 DECLARE_COMPLETION(tn2); 1201 ir_devices[i] = ir;
1156 1202 break;
1157 /* end up polling thread */
1158 if (ir->task && !IS_ERR(ir->task)) {
1159 ir->t_notify = &tn;
1160 ir->t_notify2 = &tn2;
1161 ir->shutdown = 1;
1162 wake_up_process(ir->task);
1163 complete(&tn2);
1164 wait_for_completion(&tn);
1165 ir->t_notify = NULL;
1166 ir->t_notify2 = NULL;
1167 } 1203 }
1168 1204
1169 } else { 1205 return i == MAX_IRCTL_DEVICES ? -ENOMEM : i;
1170 mutex_unlock(&ir->ir_lock); 1206}
1171 zilog_error("%s: detached from something we didn't " 1207
1172 "attach to\n", __func__); 1208/* ir_devices_lock must be held */
1173 return -ENODEV; 1209static void del_ir_device(struct IR *ir)
1210{
1211 int i;
1212
1213 for (i = 0; i < MAX_IRCTL_DEVICES; i++)
1214 if (ir_devices[i] == ir) {
1215 ir_devices[i] = NULL;
1216 break;
1217 }
1218}
1219
1220static int ir_remove(struct i2c_client *client)
1221{
1222 struct IR *ir = i2c_get_clientdata(client);
1223
1224 mutex_lock(&ir_devices_lock);
1225
1226 if (ir == NULL) {
1227 /* We destroyed everything when the first client came through */
1228 mutex_unlock(&ir_devices_lock);
1229 return 0;
1174 } 1230 }
1175 1231
1176 /* unregister lirc driver */ 1232 /* Good-bye LIRC */
1177 if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) { 1233 lirc_unregister_driver(ir->l.minor);
1178 lirc_unregister_driver(ir->l.minor); 1234
1179 ir_devices[ir->l.minor] = NULL; 1235 /* Good-bye Rx */
1236 destroy_rx_kthread(ir->rx);
1237 if (ir->rx != NULL) {
1238 if (ir->rx->buf.fifo_initialized)
1239 lirc_buffer_free(&ir->rx->buf);
1240 i2c_set_clientdata(ir->rx->c, NULL);
1241 kfree(ir->rx);
1180 } 1242 }
1181 1243
1182 /* free memory */ 1244 /* Good-bye Tx */
1183 lirc_buffer_free(&ir->buf); 1245 i2c_set_clientdata(ir->tx->c, NULL);
1184 mutex_unlock(&ir->ir_lock); 1246 kfree(ir->tx);
1247
1248 /* Good-bye IR */
1249 del_ir_device(ir);
1185 kfree(ir); 1250 kfree(ir);
1186 1251
1252 mutex_unlock(&ir_devices_lock);
1187 return 0; 1253 return 0;
1188} 1254}
1189 1255
1190static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) 1256
1257/* ir_devices_lock must be held */
1258static struct IR *find_ir_device_by_adapter(struct i2c_adapter *adapter)
1191{ 1259{
1260 int i;
1192 struct IR *ir = NULL; 1261 struct IR *ir = NULL;
1262
1263 for (i = 0; i < MAX_IRCTL_DEVICES; i++)
1264 if (ir_devices[i] != NULL &&
1265 ir_devices[i]->adapter == adapter) {
1266 ir = ir_devices[i];
1267 break;
1268 }
1269
1270 return ir;
1271}
1272
1273static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
1274{
1275 struct IR *ir;
1193 struct i2c_adapter *adap = client->adapter; 1276 struct i2c_adapter *adap = client->adapter;
1194 char buf;
1195 int ret; 1277 int ret;
1196 int have_rx = 0, have_tx = 0; 1278 bool tx_probe = false;
1197 1279
1198 dprintk("%s: adapter name (%s) nr %d, i2c_device_id name (%s), " 1280 dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
1199 "client addr=0x%02x\n", 1281 __func__, id->name, adap->nr, adap->name, client->addr);
1200 __func__, adap->name, adap->nr, id->name, client->addr);
1201 1282
1202 /* 1283 /*
1203 * FIXME - This probe function probes both the Tx and Rx 1284 * The IR receiver is at i2c address 0x71.
1204 * addresses of the IR microcontroller. 1285 * The IR transmitter is at i2c address 0x70.
1205 *
1206 * However, the I2C subsystem is passing along one I2C client at a
1207 * time, based on matches to the ir_transceiver_id[] table above.
1208 * The expectation is that each i2c_client address will be probed
1209 * individually by drivers so the I2C subsystem can mark all client
1210 * addresses as claimed or not.
1211 *
1212 * This probe routine causes only one of the client addresses, TX or RX,
1213 * to be claimed. This will cause a problem if the I2C subsystem is
1214 * subsequently triggered to probe unclaimed clients again.
1215 */ 1286 */
1216 /*
1217 * The external IR receiver is at i2c address 0x71.
1218 * The IR transmitter is at 0x70.
1219 */
1220 client->addr = 0x70;
1221 1287
1222 if (!disable_tx) { 1288 if (id->driver_data & ID_FLAG_TX)
1223 if (i2c_master_recv(client, &buf, 1) == 1) 1289 tx_probe = true;
1224 have_tx = 1; 1290 else if (tx_only) /* module option */
1225 dprintk("probe 0x70 @ %s: %s\n", 1291 return -ENXIO;
1226 adap->name, have_tx ? "success" : "failed");
1227 }
1228 1292
1229 if (!disable_rx) { 1293 zilog_info("probing IR %s on %s (i2c-%d)\n",
1230 client->addr = 0x71; 1294 tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
1231 if (i2c_master_recv(client, &buf, 1) == 1)
1232 have_rx = 1;
1233 dprintk("probe 0x71 @ %s: %s\n",
1234 adap->name, have_rx ? "success" : "failed");
1235 }
1236 1295
1237 if (!(have_rx || have_tx)) { 1296 mutex_lock(&ir_devices_lock);
1238 zilog_error("%s: no devices found\n", adap->name);
1239 goto out_nodev;
1240 }
1241 1297
1242 printk(KERN_INFO "lirc_zilog: chip found with %s\n", 1298 /* Use a single struct IR instance for both the Rx and Tx functions */
1243 have_rx && have_tx ? "RX and TX" : 1299 ir = find_ir_device_by_adapter(adap);
1244 have_rx ? "RX only" : "TX only"); 1300 if (ir == NULL) {
1301 ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
1302 if (ir == NULL) {
1303 ret = -ENOMEM;
1304 goto out_no_ir;
1305 }
1306 /* store for use in ir_probe() again, and open() later on */
1307 ret = add_ir_device(ir);
1308 if (ret)
1309 goto out_free_ir;
1310
1311 ir->adapter = adap;
1312 mutex_init(&ir->ir_lock);
1313
1314 /* set lirc_dev stuff */
1315 memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
1316 ir->l.minor = minor; /* module option */
1317 ir->l.code_length = 13;
1318 ir->l.rbuf = NULL;
1319 ir->l.fops = &lirc_fops;
1320 ir->l.data = ir;
1321 ir->l.dev = &adap->dev;
1322 ir->l.sample_rate = 0;
1323 }
1245 1324
1246 ir = kzalloc(sizeof(struct IR), GFP_KERNEL); 1325 if (tx_probe) {
1326 /* Set up a struct IR_tx instance */
1327 ir->tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
1328 if (ir->tx == NULL) {
1329 ret = -ENOMEM;
1330 goto out_free_xx;
1331 }
1247 1332
1248 if (!ir) 1333 ir->tx->c = client;
1249 goto out_nomem; 1334 ir->tx->need_boot = 1;
1335 ir->tx->post_tx_ready_poll =
1336 (id->driver_data & ID_FLAG_HDPVR) ? false : true;
1337 } else {
1338 /* Set up a struct IR_rx instance */
1339 ir->rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
1340 if (ir->rx == NULL) {
1341 ret = -ENOMEM;
1342 goto out_free_xx;
1343 }
1250 1344
1251 ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2); 1345 ret = lirc_buffer_init(&ir->rx->buf, 2, BUFLEN / 2);
1252 if (ret) 1346 if (ret)
1253 goto out_nomem; 1347 goto out_free_xx;
1254 1348
1255 mutex_init(&ir->ir_lock); 1349 mutex_init(&ir->rx->buf_lock);
1256 mutex_init(&ir->buf_lock); 1350 ir->rx->c = client;
1257 ir->need_boot = 1; 1351 ir->rx->hdpvr_data_fmt =
1258 ir->is_hdpvr = (id->driver_data & ID_FLAG_HDPVR) ? true : false; 1352 (id->driver_data & ID_FLAG_HDPVR) ? true : false;
1259 1353
1260 memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver)); 1354 /* set lirc_dev stuff */
1261 ir->l.minor = -1; 1355 ir->l.rbuf = &ir->rx->buf;
1356 }
1262 1357
1263 /* I2C attach to device */
1264 i2c_set_clientdata(client, ir); 1358 i2c_set_clientdata(client, ir);
1265 1359
1266 /* initialise RX device */ 1360 /* Proceed only if we have the required Tx and Rx clients ready to go */
1267 if (have_rx) { 1361 if (ir->tx == NULL ||
1268 DECLARE_COMPLETION(tn); 1362 (ir->rx == NULL && !tx_only)) {
1269 memcpy(&ir->c_rx, client, sizeof(struct i2c_client)); 1363 zilog_info("probe of IR %s on %s (i2c-%d) done. Waiting on "
1270 1364 "IR %s.\n", tx_probe ? "Tx" : "Rx", adap->name,
1271 ir->c_rx.addr = 0x71; 1365 adap->nr, tx_probe ? "Rx" : "Tx");
1272 strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME, 1366 goto out_ok;
1273 I2C_NAME_SIZE); 1367 }
1274 1368
1369 /* initialise RX device */
1370 if (ir->rx != NULL) {
1275 /* try to fire up polling thread */ 1371 /* try to fire up polling thread */
1276 ir->t_notify = &tn; 1372 ir->rx->task = kthread_run(lirc_thread, ir,
1277 ir->task = kthread_run(lirc_thread, ir, "lirc_zilog"); 1373 "zilog-rx-i2c-%d", adap->nr);
1278 if (IS_ERR(ir->task)) { 1374 if (IS_ERR(ir->rx->task)) {
1279 ret = PTR_ERR(ir->task); 1375 ret = PTR_ERR(ir->rx->task);
1280 zilog_error("lirc_register_driver: cannot run " 1376 zilog_error("%s: could not start IR Rx polling thread"
1281 "poll thread %d\n", ret); 1377 "\n", __func__);
1282 goto err; 1378 goto out_free_xx;
1283 } 1379 }
1284 wait_for_completion(&tn);
1285 ir->t_notify = NULL;
1286 ir->have_rx = 1;
1287 } 1380 }
1288 1381
1289 /* initialise TX device */
1290 if (have_tx) {
1291 memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
1292 ir->c_tx.addr = 0x70;
1293 strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
1294 I2C_NAME_SIZE);
1295 ir->have_tx = 1;
1296 }
1297
1298 /* set lirc_dev stuff */
1299 ir->l.code_length = 13;
1300 ir->l.rbuf = &ir->buf;
1301 ir->l.fops = &lirc_fops;
1302 ir->l.data = ir;
1303 ir->l.minor = minor;
1304 ir->l.dev = &adap->dev;
1305 ir->l.sample_rate = 0;
1306
1307 /* register with lirc */ 1382 /* register with lirc */
1308 ir->l.minor = lirc_register_driver(&ir->l); 1383 ir->l.minor = lirc_register_driver(&ir->l);
1309 if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) { 1384 if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
1310 zilog_error("ir_attach: \"minor\" must be between 0 and %d " 1385 zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
1311 "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor); 1386 __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
1312 ret = -EBADRQC; 1387 ret = -EBADRQC;
1313 goto err; 1388 goto out_free_thread;
1314 } 1389 }
1315 1390
1316 /* store this for getting back in open() later on */
1317 ir_devices[ir->l.minor] = ir;
1318
1319 /* 1391 /*
1320 * if we have the tx device, load the 'firmware'. We do this 1392 * if we have the tx device, load the 'firmware'. We do this
1321 * after registering with lirc as otherwise hotplug seems to take 1393 * after registering with lirc as otherwise hotplug seems to take
1322 * 10s to create the lirc device. 1394 * 10s to create the lirc device.
1323 */ 1395 */
1324 if (have_tx) { 1396 ret = tx_init(ir->tx);
1325 /* Special TX init */ 1397 if (ret != 0)
1326 ret = tx_init(ir); 1398 goto out_unregister;
1327 if (ret != 0)
1328 goto err;
1329 }
1330 1399
1400 zilog_info("probe of IR %s on %s (i2c-%d) done. IR unit ready.\n",
1401 tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
1402out_ok:
1403 mutex_unlock(&ir_devices_lock);
1331 return 0; 1404 return 0;
1332 1405
1333err: 1406out_unregister:
1334 /* undo everything, hopefully... */ 1407 lirc_unregister_driver(ir->l.minor);
1335 if (ir->c_rx.addr) 1408out_free_thread:
1336 ir_remove(&ir->c_rx); 1409 destroy_rx_kthread(ir->rx);
1337 if (ir->c_tx.addr) 1410out_free_xx:
1338 ir_remove(&ir->c_tx); 1411 if (ir->rx != NULL) {
1339 return ret; 1412 if (ir->rx->buf.fifo_initialized)
1340 1413 lirc_buffer_free(&ir->rx->buf);
1341out_nodev: 1414 if (ir->rx->c != NULL)
1342 zilog_error("no device found\n"); 1415 i2c_set_clientdata(ir->rx->c, NULL);
1343 return -ENODEV; 1416 kfree(ir->rx);
1344 1417 }
1345out_nomem: 1418 if (ir->tx != NULL) {
1346 zilog_error("memory allocation failure\n"); 1419 if (ir->tx->c != NULL)
1420 i2c_set_clientdata(ir->tx->c, NULL);
1421 kfree(ir->tx);
1422 }
1423out_free_ir:
1424 del_ir_device(ir);
1347 kfree(ir); 1425 kfree(ir);
1348 return -ENOMEM; 1426out_no_ir:
1349} 1427 zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
1350 1428 __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
1351static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg) 1429 ret);
1352{ 1430 mutex_unlock(&ir_devices_lock);
1353 /* nothing */ 1431 return ret;
1354 return 0;
1355} 1432}
1356 1433
1357static int __init zilog_init(void) 1434static int __init zilog_init(void)
@@ -1361,6 +1438,7 @@ static int __init zilog_init(void)
1361 zilog_notify("Zilog/Hauppauge IR driver initializing\n"); 1438 zilog_notify("Zilog/Hauppauge IR driver initializing\n");
1362 1439
1363 mutex_init(&tx_data_lock); 1440 mutex_init(&tx_data_lock);
1441 mutex_init(&ir_devices_lock);
1364 1442
1365 request_module("firmware_class"); 1443 request_module("firmware_class");
1366 1444
@@ -1386,7 +1464,8 @@ module_exit(zilog_exit);
1386 1464
1387MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)"); 1465MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
1388MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, " 1466MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
1389 "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver"); 1467 "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
1468 "Andy Walls");
1390MODULE_LICENSE("GPL"); 1469MODULE_LICENSE("GPL");
1391/* for compat with old name, which isn't all that accurate anymore */ 1470/* for compat with old name, which isn't all that accurate anymore */
1392MODULE_ALIAS("lirc_pvr150"); 1471MODULE_ALIAS("lirc_pvr150");
@@ -1397,8 +1476,5 @@ MODULE_PARM_DESC(minor, "Preferred minor device number");
1397module_param(debug, bool, 0644); 1476module_param(debug, bool, 0644);
1398MODULE_PARM_DESC(debug, "Enable debugging messages"); 1477MODULE_PARM_DESC(debug, "Enable debugging messages");
1399 1478
1400module_param(disable_rx, bool, 0644); 1479module_param(tx_only, bool, 0644);
1401MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device"); 1480MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
1402
1403module_param(disable_tx, bool, 0644);
1404MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device");
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
index 23fa049b51f2..a2f29d464051 100644
--- a/drivers/staging/msm/msm_fb.c
+++ b/drivers/staging/msm/msm_fb.c
@@ -347,7 +347,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
347 if ((!mfd) || (mfd->key != MFD_KEY)) 347 if ((!mfd) || (mfd->key != MFD_KEY))
348 return 0; 348 return 0;
349 349
350 acquire_console_sem(); 350 console_lock();
351 fb_set_suspend(mfd->fbi, 1); 351 fb_set_suspend(mfd->fbi, 1);
352 352
353 ret = msm_fb_suspend_sub(mfd); 353 ret = msm_fb_suspend_sub(mfd);
@@ -358,7 +358,7 @@ static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
358 pdev->dev.power.power_state = state; 358 pdev->dev.power.power_state = state;
359 } 359 }
360 360
361 release_console_sem(); 361 console_unlock();
362 return ret; 362 return ret;
363} 363}
364#else 364#else
@@ -431,11 +431,11 @@ static int msm_fb_resume(struct platform_device *pdev)
431 if ((!mfd) || (mfd->key != MFD_KEY)) 431 if ((!mfd) || (mfd->key != MFD_KEY))
432 return 0; 432 return 0;
433 433
434 acquire_console_sem(); 434 console_lock();
435 ret = msm_fb_resume_sub(mfd); 435 ret = msm_fb_resume_sub(mfd);
436 pdev->dev.power.power_state = PMSG_ON; 436 pdev->dev.power.power_state = PMSG_ON;
437 fb_set_suspend(mfd->fbi, 1); 437 fb_set_suspend(mfd->fbi, 1);
438 release_console_sem(); 438 console_unlock();
439 439
440 return ret; 440 return ret;
441} 441}
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 9f26dc9408bb..56a283d1a74d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -373,17 +373,17 @@ static void dcon_source_switch(struct work_struct *work)
373 * 373 *
374 * For now, we just hope.. 374 * For now, we just hope..
375 */ 375 */
376 acquire_console_sem(); 376 console_lock();
377 ignore_fb_events = 1; 377 ignore_fb_events = 1;
378 if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) { 378 if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) {
379 ignore_fb_events = 0; 379 ignore_fb_events = 0;
380 release_console_sem(); 380 console_unlock();
381 printk(KERN_ERR "olpc-dcon: Failed to enter CPU mode\n"); 381 printk(KERN_ERR "olpc-dcon: Failed to enter CPU mode\n");
382 dcon_pending = DCON_SOURCE_DCON; 382 dcon_pending = DCON_SOURCE_DCON;
383 return; 383 return;
384 } 384 }
385 ignore_fb_events = 0; 385 ignore_fb_events = 0;
386 release_console_sem(); 386 console_unlock();
387 387
388 /* And turn off the DCON */ 388 /* And turn off the DCON */
389 pdata->set_dconload(1); 389 pdata->set_dconload(1);
@@ -435,12 +435,12 @@ static void dcon_source_switch(struct work_struct *work)
435 } 435 }
436 } 436 }
437 437
438 acquire_console_sem(); 438 console_lock();
439 ignore_fb_events = 1; 439 ignore_fb_events = 1;
440 if (fb_blank(fbinfo, FB_BLANK_POWERDOWN)) 440 if (fb_blank(fbinfo, FB_BLANK_POWERDOWN))
441 printk(KERN_ERR "olpc-dcon: couldn't blank fb!\n"); 441 printk(KERN_ERR "olpc-dcon: couldn't blank fb!\n");
442 ignore_fb_events = 0; 442 ignore_fb_events = 0;
443 release_console_sem(); 443 console_unlock();
444 444
445 printk(KERN_INFO "olpc-dcon: The DCON has control\n"); 445 printk(KERN_INFO "olpc-dcon: The DCON has control\n");
446 break; 446 break;
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index 701561d6b6fd..236dd36d349a 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -484,8 +484,6 @@ struct net_device *RtmpPhyNetDevInit(struct rt_rtmp_adapter *pAd,
484 net_dev->ml_priv = (void *)pAd; 484 net_dev->ml_priv = (void *)pAd;
485 pAd->net_dev = net_dev; 485 pAd->net_dev = net_dev;
486 486
487 netif_stop_queue(net_dev);
488
489 return net_dev; 487 return net_dev;
490 488
491} 489}
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ee68d51caa4e..322bf49ee906 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -106,6 +106,7 @@ struct usb_device_id rtusb_usb_id[] = {
106 {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */ 106 {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
107 {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */ 107 {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
108 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ 108 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
109 {USB_DEVICE(0x1737, 0x0078)}, /* Linksys WUSB100v2 */
109 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ 110 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
110 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ 111 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
111 {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */ 112 {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 32088a641eba..84be383abec3 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -128,12 +128,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
128 u8 *ptmpchar = NULL, *ppayload, *ptr; 128 u8 *ptmpchar = NULL, *ppayload, *ptr;
129 struct tx_desc *ptx_desc; 129 struct tx_desc *ptx_desc;
130 u32 txdscp_sz = sizeof(struct tx_desc); 130 u32 txdscp_sz = sizeof(struct tx_desc);
131 u8 ret = _FAIL;
131 132
132 ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw); 133 ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
133 if (pmappedfw && (ulfilelength > 0)) { 134 if (pmappedfw && (ulfilelength > 0)) {
134 update_fwhdr(&fwhdr, pmappedfw); 135 update_fwhdr(&fwhdr, pmappedfw);
135 if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL) 136 if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
136 goto exit_fail; 137 goto firmware_rel;
137 fill_fwpriv(padapter, &fwhdr.fwpriv); 138 fill_fwpriv(padapter, &fwhdr.fwpriv);
138 /* firmware check ok */ 139 /* firmware check ok */
139 maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ? 140 maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -141,7 +142,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
141 maxlen += txdscp_sz; 142 maxlen += txdscp_sz;
142 ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ); 143 ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
143 if (ptmpchar == NULL) 144 if (ptmpchar == NULL)
144 return _FAIL; 145 goto firmware_rel;
145 146
146 ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ - 147 ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
147 ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1))); 148 ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -273,11 +274,13 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
273 goto exit_fail; 274 goto exit_fail;
274 } else 275 } else
275 goto exit_fail; 276 goto exit_fail;
276 return _SUCCESS; 277 ret = _SUCCESS;
277 278
278exit_fail: 279exit_fail:
279 kfree(ptmpchar); 280 kfree(ptmpchar);
280 return _FAIL; 281firmware_rel:
282 release_firmware((struct firmware *)phfwfile_hdl);
283 return ret;
281} 284}
282 285
283uint rtl8712_hal_init(struct _adapter *padapter) 286uint rtl8712_hal_init(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a692ee88b9e9..21ce2af447b5 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -47,54 +47,123 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
47static void r871xu_dev_remove(struct usb_interface *pusb_intf); 47static void r871xu_dev_remove(struct usb_interface *pusb_intf);
48 48
49static struct usb_device_id rtl871x_usb_id_tbl[] = { 49static struct usb_device_id rtl871x_usb_id_tbl[] = {
50 /*92SU 50
51 * Realtek */ 51/* RTL8188SU */
52 {USB_DEVICE(0x0bda, 0x8171)}, 52 /* Realtek */
53 {USB_DEVICE(0x0bda, 0x8172)}, 53 {USB_DEVICE(0x0BDA, 0x8171)},
54 {USB_DEVICE(0x0bda, 0x8173)}, 54 {USB_DEVICE(0x0bda, 0x8173)},
55 {USB_DEVICE(0x0bda, 0x8174)},
56 {USB_DEVICE(0x0bda, 0x8712)}, 55 {USB_DEVICE(0x0bda, 0x8712)},
57 {USB_DEVICE(0x0bda, 0x8713)}, 56 {USB_DEVICE(0x0bda, 0x8713)},
58 {USB_DEVICE(0x0bda, 0xC512)}, 57 {USB_DEVICE(0x0bda, 0xC512)},
59 /* Abocom */ 58 /* Abocom */
60 {USB_DEVICE(0x07B8, 0x8188)}, 59 {USB_DEVICE(0x07B8, 0x8188)},
60 /* ASUS */
61 {USB_DEVICE(0x0B05, 0x1786)},
62 {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
63 /* Belkin */
64 {USB_DEVICE(0x050D, 0x945A)},
61 /* Corega */ 65 /* Corega */
62 {USB_DEVICE(0x07aa, 0x0047)}, 66 {USB_DEVICE(0x07AA, 0x0047)},
63 /* Dlink */ 67 /* D-Link */
64 {USB_DEVICE(0x07d1, 0x3303)}, 68 {USB_DEVICE(0x2001, 0x3306)},
65 {USB_DEVICE(0x07d1, 0x3302)}, 69 {USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
66 {USB_DEVICE(0x07d1, 0x3300)}, 70 /* Edimax */
67 /* Dlink for Skyworth */ 71 {USB_DEVICE(0x7392, 0x7611)},
68 {USB_DEVICE(0x14b2, 0x3300)},
69 {USB_DEVICE(0x14b2, 0x3301)},
70 {USB_DEVICE(0x14b2, 0x3302)},
71 /* EnGenius */ 72 /* EnGenius */
72 {USB_DEVICE(0x1740, 0x9603)}, 73 {USB_DEVICE(0x1740, 0x9603)},
73 {USB_DEVICE(0x1740, 0x9605)}, 74 /* Hawking */
75 {USB_DEVICE(0x0E66, 0x0016)},
76 /* Hercules */
77 {USB_DEVICE(0x06F8, 0xE034)},
78 {USB_DEVICE(0x06F8, 0xE032)},
79 /* Logitec */
80 {USB_DEVICE(0x0789, 0x0167)},
81 /* PCI */
82 {USB_DEVICE(0x2019, 0xAB28)},
83 {USB_DEVICE(0x2019, 0xED16)},
84 /* Sitecom */
85 {USB_DEVICE(0x0DF6, 0x0057)},
86 {USB_DEVICE(0x0DF6, 0x0045)},
87 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
88 {USB_DEVICE(0x0DF6, 0x004B)},
89 {USB_DEVICE(0x0DF6, 0x0063)},
90 /* Sweex */
91 {USB_DEVICE(0x177F, 0x0154)},
92 /* Thinkware */
93 {USB_DEVICE(0x0BDA, 0x5077)},
94 /* Toshiba */
95 {USB_DEVICE(0x1690, 0x0752)},
96 /* - */
97 {USB_DEVICE(0x20F4, 0x646B)},
98 {USB_DEVICE(0x083A, 0xC512)},
99
100/* RTL8191SU */
101 /* Realtek */
102 {USB_DEVICE(0x0BDA, 0x8172)},
103 /* Amigo */
104 {USB_DEVICE(0x0EB0, 0x9061)},
105 /* ASUS/EKB */
106 {USB_DEVICE(0x0BDA, 0x8172)},
107 {USB_DEVICE(0x13D3, 0x3323)},
108 {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
109 {USB_DEVICE(0x13D3, 0x3342)},
110 /* ASUS/EKBLenovo */
111 {USB_DEVICE(0x13D3, 0x3333)},
112 {USB_DEVICE(0x13D3, 0x3334)},
113 {USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
114 {USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
115 /* ASUS/Media BOX */
116 {USB_DEVICE(0x13D3, 0x3309)},
74 /* Belkin */ 117 /* Belkin */
75 {USB_DEVICE(0x050d, 0x815F)}, 118 {USB_DEVICE(0x050D, 0x815F)},
76 {USB_DEVICE(0x050d, 0x945A)}, 119 /* D-Link */
77 {USB_DEVICE(0x050d, 0x845A)}, 120 {USB_DEVICE(0x07D1, 0x3302)},
78 /* Guillemot */ 121 {USB_DEVICE(0x07D1, 0x3300)},
79 {USB_DEVICE(0x06f8, 0xe031)}, 122 {USB_DEVICE(0x07D1, 0x3303)},
80 /* Edimax */ 123 /* Edimax */
81 {USB_DEVICE(0x7392, 0x7611)},
82 {USB_DEVICE(0x7392, 0x7612)}, 124 {USB_DEVICE(0x7392, 0x7612)},
83 {USB_DEVICE(0x7392, 0x7622)}, 125 /* EnGenius */
84 /* Sitecom */ 126 {USB_DEVICE(0x1740, 0x9605)},
85 {USB_DEVICE(0x0DF6, 0x0045)}, 127 /* Guillemot */
128 {USB_DEVICE(0x06F8, 0xE031)},
86 /* Hawking */ 129 /* Hawking */
87 {USB_DEVICE(0x0E66, 0x0015)}, 130 {USB_DEVICE(0x0E66, 0x0015)},
88 {USB_DEVICE(0x0E66, 0x0016)}, 131 /* Mediao */
89 {USB_DEVICE(0x0b05, 0x1786)},
90 {USB_DEVICE(0x0b05, 0x1791)}, /* 11n mode disable */
91
92 {USB_DEVICE(0x13D3, 0x3306)}, 132 {USB_DEVICE(0x13D3, 0x3306)},
93 {USB_DEVICE(0x13D3, 0x3309)}, 133 /* PCI */
134 {USB_DEVICE(0x2019, 0xED18)},
135 {USB_DEVICE(0x2019, 0x4901)},
136 /* Sitecom */
137 {USB_DEVICE(0x0DF6, 0x0058)},
138 {USB_DEVICE(0x0DF6, 0x0049)},
139 {USB_DEVICE(0x0DF6, 0x004C)},
140 {USB_DEVICE(0x0DF6, 0x0064)},
141 /* Skyworth */
142 {USB_DEVICE(0x14b2, 0x3300)},
143 {USB_DEVICE(0x14b2, 0x3301)},
144 {USB_DEVICE(0x14B2, 0x3302)},
145 /* - */
146 {USB_DEVICE(0x04F2, 0xAFF2)},
147 {USB_DEVICE(0x04F2, 0xAFF5)},
148 {USB_DEVICE(0x04F2, 0xAFF6)},
149 {USB_DEVICE(0x13D3, 0x3339)},
150 {USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
151 {USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
94 {USB_DEVICE(0x13D3, 0x3310)}, 152 {USB_DEVICE(0x13D3, 0x3310)},
95 {USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
96 {USB_DEVICE(0x13D3, 0x3325)}, 153 {USB_DEVICE(0x13D3, 0x3325)},
97 {USB_DEVICE(0x083A, 0xC512)}, 154
155/* RTL8192SU */
156 /* Realtek */
157 {USB_DEVICE(0x0BDA, 0x8174)},
158 {USB_DEVICE(0x0BDA, 0x8174)},
159 /* Belkin */
160 {USB_DEVICE(0x050D, 0x845A)},
161 /* Corega */
162 {USB_DEVICE(0x07AA, 0x0051)},
163 /* Edimax */
164 {USB_DEVICE(0x7392, 0x7622)},
165 /* NEC */
166 {USB_DEVICE(0x0409, 0x02B6)},
98 {} 167 {}
99}; 168};
100 169
@@ -103,8 +172,20 @@ MODULE_DEVICE_TABLE(usb, rtl871x_usb_id_tbl);
103static struct specific_device_id specific_device_id_tbl[] = { 172static struct specific_device_id specific_device_id_tbl[] = {
104 {.idVendor = 0x0b05, .idProduct = 0x1791, 173 {.idVendor = 0x0b05, .idProduct = 0x1791,
105 .flags = SPEC_DEV_ID_DISABLE_HT}, 174 .flags = SPEC_DEV_ID_DISABLE_HT},
175 {.idVendor = 0x0df6, .idProduct = 0x0059,
176 .flags = SPEC_DEV_ID_DISABLE_HT},
177 {.idVendor = 0x13d3, .idProduct = 0x3306,
178 .flags = SPEC_DEV_ID_DISABLE_HT},
106 {.idVendor = 0x13D3, .idProduct = 0x3311, 179 {.idVendor = 0x13D3, .idProduct = 0x3311,
107 .flags = SPEC_DEV_ID_DISABLE_HT}, 180 .flags = SPEC_DEV_ID_DISABLE_HT},
181 {.idVendor = 0x13d3, .idProduct = 0x3335,
182 .flags = SPEC_DEV_ID_DISABLE_HT},
183 {.idVendor = 0x13d3, .idProduct = 0x3336,
184 .flags = SPEC_DEV_ID_DISABLE_HT},
185 {.idVendor = 0x13d3, .idProduct = 0x3340,
186 .flags = SPEC_DEV_ID_DISABLE_HT},
187 {.idVendor = 0x13d3, .idProduct = 0x3341,
188 .flags = SPEC_DEV_ID_DISABLE_HT},
108 {} 189 {}
109}; 190};
110 191
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index f4b163f7338a..d007e4a12c14 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1044,9 +1044,9 @@ static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg)
1044 1044
1045 /* when doing suspend, call fb apis and pci apis */ 1045 /* when doing suspend, call fb apis and pci apis */
1046 if (msg.event == PM_EVENT_SUSPEND) { 1046 if (msg.event == PM_EVENT_SUSPEND) {
1047 acquire_console_sem(); 1047 console_lock();
1048 fb_set_suspend(&sfb->fb, 1); 1048 fb_set_suspend(&sfb->fb, 1);
1049 release_console_sem(); 1049 console_unlock();
1050 retv = pci_save_state(pdev); 1050 retv = pci_save_state(pdev);
1051 pci_disable_device(pdev); 1051 pci_disable_device(pdev);
1052 retv = pci_choose_state(pdev, msg); 1052 retv = pci_choose_state(pdev, msg);
@@ -1071,7 +1071,7 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
1071 /* when resuming, restore pci data and fb cursor */ 1071 /* when resuming, restore pci data and fb cursor */
1072 if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) { 1072 if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
1073 retv = pci_set_power_state(pdev, PCI_D0); 1073 retv = pci_set_power_state(pdev, PCI_D0);
1074 retv = pci_restore_state(pdev); 1074 pci_restore_state(pdev);
1075 if (pci_enable_device(pdev)) 1075 if (pci_enable_device(pdev))
1076 return -1; 1076 return -1;
1077 pci_set_master(pdev); 1077 pci_set_master(pdev);
@@ -1105,9 +1105,9 @@ static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
1105 1105
1106 smtcfb_setmode(sfb); 1106 smtcfb_setmode(sfb);
1107 1107
1108 acquire_console_sem(); 1108 console_lock();
1109 fb_set_suspend(&sfb->fb, 0); 1109 fb_set_suspend(&sfb->fb, 0);
1110 release_console_sem(); 1110 console_unlock();
1111 1111
1112 return 0; 1112 return 0;
1113} 1113}
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index 87a3a9bd5842..f204d33910ec 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -283,7 +283,7 @@ static int smb_compare_dentry(const struct dentry *,
283 unsigned int, const char *, const struct qstr *); 283 unsigned int, const char *, const struct qstr *);
284static int smb_delete_dentry(const struct dentry *); 284static int smb_delete_dentry(const struct dentry *);
285 285
286static const struct dentry_operations smbfs_dentry_operations = 286const struct dentry_operations smbfs_dentry_operations =
287{ 287{
288 .d_revalidate = smb_lookup_validate, 288 .d_revalidate = smb_lookup_validate,
289 .d_hash = smb_hash_dentry, 289 .d_hash = smb_hash_dentry,
@@ -291,7 +291,7 @@ static const struct dentry_operations smbfs_dentry_operations =
291 .d_delete = smb_delete_dentry, 291 .d_delete = smb_delete_dentry,
292}; 292};
293 293
294static const struct dentry_operations smbfs_dentry_operations_case = 294const struct dentry_operations smbfs_dentry_operations_case =
295{ 295{
296 .d_revalidate = smb_lookup_validate, 296 .d_revalidate = smb_lookup_validate,
297 .d_delete = smb_delete_dentry, 297 .d_delete = smb_delete_dentry,
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 408bb9b3303e..07a7f5432597 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -332,7 +332,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
332 unsigned long flags; 332 unsigned long flags;
333 333
334 len = strlen(buf); 334 len = strlen(buf);
335 if (len > 0 || len < 3) { 335 if (len > 0 && len < 3) {
336 ch = buf[0]; 336 ch = buf[0];
337 if (ch == '\n') 337 if (ch == '\n')
338 ch = '0'; 338 ch = '0';
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index e8f047e86a32..80183a7e6624 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -986,12 +986,6 @@ static int __devinit synaptics_rmi4_probe
986 input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0, 986 input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
987 MAX_TOUCH_MAJOR, 0, 0); 987 MAX_TOUCH_MAJOR, 0, 0);
988 988
989 retval = input_register_device(rmi4_data->input_dev);
990 if (retval) {
991 dev_err(&client->dev, "%s:input register failed\n", __func__);
992 goto err_input_register;
993 }
994
995 /* Clear interrupts */ 989 /* Clear interrupts */
996 synaptics_rmi4_i2c_block_read(rmi4_data, 990 synaptics_rmi4_i2c_block_read(rmi4_data,
997 rmi4_data->fn01_data_base_addr + 1, intr_status, 991 rmi4_data->fn01_data_base_addr + 1, intr_status,
@@ -1003,15 +997,20 @@ static int __devinit synaptics_rmi4_probe
1003 if (retval) { 997 if (retval) {
1004 dev_err(&client->dev, "%s:Unable to get attn irq %d\n", 998 dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
1005 __func__, platformdata->irq_number); 999 __func__, platformdata->irq_number);
1006 goto err_request_irq; 1000 goto err_unset_clientdata;
1001 }
1002
1003 retval = input_register_device(rmi4_data->input_dev);
1004 if (retval) {
1005 dev_err(&client->dev, "%s:input register failed\n", __func__);
1006 goto err_free_irq;
1007 } 1007 }
1008 1008
1009 return retval; 1009 return retval;
1010 1010
1011err_request_irq: 1011err_free_irq:
1012 free_irq(platformdata->irq_number, rmi4_data); 1012 free_irq(platformdata->irq_number, rmi4_data);
1013 input_unregister_device(rmi4_data->input_dev); 1013err_unset_clientdata:
1014err_input_register:
1015 i2c_set_clientdata(client, NULL); 1014 i2c_set_clientdata(client, NULL);
1016err_query_dev: 1015err_query_dev:
1017 if (platformdata->regulator_en) { 1016 if (platformdata->regulator_en) {
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 571864555ddd..27e0aa81a584 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -949,7 +949,7 @@ func_end:
949 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then 949 * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
950 * schedules a DPC to dispatch I/O. 950 * schedules a DPC to dispatch I/O.
951 */ 951 */
952void io_mbox_msg(u32 msg) 952int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
953{ 953{
954 struct io_mgr *pio_mgr; 954 struct io_mgr *pio_mgr;
955 struct dev_object *dev_obj; 955 struct dev_object *dev_obj;
@@ -959,9 +959,9 @@ void io_mbox_msg(u32 msg)
959 dev_get_io_mgr(dev_obj, &pio_mgr); 959 dev_get_io_mgr(dev_obj, &pio_mgr);
960 960
961 if (!pio_mgr) 961 if (!pio_mgr)
962 return; 962 return NOTIFY_BAD;
963 963
964 pio_mgr->intr_val = (u16)msg; 964 pio_mgr->intr_val = (u16)((u32)msg);
965 if (pio_mgr->intr_val & MBX_PM_CLASS) 965 if (pio_mgr->intr_val & MBX_PM_CLASS)
966 io_dispatch_pm(pio_mgr); 966 io_dispatch_pm(pio_mgr);
967 967
@@ -973,7 +973,7 @@ void io_mbox_msg(u32 msg)
973 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags); 973 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
974 tasklet_schedule(&pio_mgr->dpc_tasklet); 974 tasklet_schedule(&pio_mgr->dpc_tasklet);
975 } 975 }
976 return; 976 return NOTIFY_OK;
977} 977}
978 978
979/* 979/*
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index a3b0a183d570..a3f69f6f505f 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -223,6 +223,10 @@ static struct bridge_drv_interface drv_interface_fxns = {
223 bridge_msg_set_queue_id, 223 bridge_msg_set_queue_id,
224}; 224};
225 225
226static struct notifier_block dsp_mbox_notifier = {
227 .notifier_call = io_mbox_msg,
228};
229
226static inline void flush_all(struct bridge_dev_context *dev_context) 230static inline void flush_all(struct bridge_dev_context *dev_context)
227{ 231{
228 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || 232 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
@@ -553,7 +557,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
553 * Enable Mailbox events and also drain any pending 557 * Enable Mailbox events and also drain any pending
554 * stale messages. 558 * stale messages.
555 */ 559 */
556 dev_context->mbox = omap_mbox_get("dsp"); 560 dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
557 if (IS_ERR(dev_context->mbox)) { 561 if (IS_ERR(dev_context->mbox)) {
558 dev_context->mbox = NULL; 562 dev_context->mbox = NULL;
559 pr_err("%s: Failed to get dsp mailbox handle\n", 563 pr_err("%s: Failed to get dsp mailbox handle\n",
@@ -563,8 +567,6 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
563 567
564 } 568 }
565 if (!status) { 569 if (!status) {
566 dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
567
568/*PM_IVA2GRPSEL_PER = 0xC0;*/ 570/*PM_IVA2GRPSEL_PER = 0xC0;*/
569 temp = readl(resources->dw_per_pm_base + 0xA8); 571 temp = readl(resources->dw_per_pm_base + 0xA8);
570 temp = (temp & 0xFFFFFF30) | 0xC0; 572 temp = (temp & 0xFFFFFF30) | 0xC0;
@@ -685,7 +687,7 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
685 /* Disable the mailbox interrupts */ 687 /* Disable the mailbox interrupts */
686 if (dev_context->mbox) { 688 if (dev_context->mbox) {
687 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 689 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
688 omap_mbox_put(dev_context->mbox); 690 omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
689 dev_context->mbox = NULL; 691 dev_context->mbox = NULL;
690 } 692 }
691 /* Reset IVA2 clocks*/ 693 /* Reset IVA2 clocks*/
@@ -786,10 +788,7 @@ static int bridge_dev_create(struct bridge_dev_context
786 788
787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); 789 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
788 if (pt_attrs != NULL) { 790 if (pt_attrs != NULL) {
789 /* Assuming that we use only DSP's memory map 791 pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
790 * until 0x4000:0000 , we would need only 1024
791 * L1 enties i.e L1 size = 4K */
792 pt_attrs->l1_size = 0x1000;
793 align_size = pt_attrs->l1_size; 792 align_size = pt_attrs->l1_size;
794 /* Align sizes are expected to be power of 2 */ 793 /* Align sizes are expected to be power of 2 */
795 /* we like to get aligned on L1 table size */ 794 /* we like to get aligned on L1 table size */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index 18aec55d8647..8242c70e09dd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -72,22 +72,17 @@ extern void io_dpc(unsigned long ref_data);
72/* 72/*
73 * ======== io_mbox_msg ======== 73 * ======== io_mbox_msg ========
74 * Purpose: 74 * Purpose:
75 * Main interrupt handler for the shared memory Bridge channel manager. 75 * Main message handler for the shared memory Bridge channel manager.
76 * Calls the Bridge's chnlsm_isr to determine if this interrupt is ours, 76 * Determine if this message is ours, then schedules a DPC to
77 * then schedules a DPC to dispatch I/O. 77 * dispatch I/O.
78 * Parameters: 78 * Parameters:
79 * ref_data: Pointer to the channel manager object for this board. 79 * self: Pointer to its own notifier_block struct.
80 * Set in an initial call to ISR_Install(). 80 * len: Length of message.
81 * msg: Message code received.
81 * Returns: 82 * Returns:
82 * TRUE if interrupt handled; FALSE otherwise. 83 * NOTIFY_OK if handled; NOTIFY_BAD otherwise.
83 * Requires:
84 * Must be in locked memory if executing in kernel mode.
85 * Must only call functions which are in locked memory if Kernel mode.
86 * Must only call asynchronous services.
87 * Interrupts are disabled and EOI for this interrupt has been sent.
88 * Ensures:
89 */ 84 */
90void io_mbox_msg(u32 msg); 85int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg);
91 86
92/* 87/*
93 * ======== io_request_chnl ======== 88 * ======== io_request_chnl ========
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 8fe017c3721f..eb9b9f1bc138 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1450,29 +1450,55 @@ static struct video_device tm6000_template = {
1450 * ------------------------------------------------------------------ 1450 * ------------------------------------------------------------------
1451 */ 1451 */
1452 1452
1453int tm6000_v4l2_register(struct tm6000_core *dev) 1453static struct video_device *vdev_init(struct tm6000_core *dev,
1454 const struct video_device
1455 *template, const char *type_name)
1454{ 1456{
1455 int ret = -1;
1456 struct video_device *vfd; 1457 struct video_device *vfd;
1457 1458
1458 vfd = video_device_alloc(); 1459 vfd = video_device_alloc();
1459 if(!vfd) { 1460 if (NULL == vfd)
1461 return NULL;
1462
1463 *vfd = *template;
1464 vfd->v4l2_dev = &dev->v4l2_dev;
1465 vfd->release = video_device_release;
1466 vfd->debug = tm6000_debug;
1467 vfd->lock = &dev->lock;
1468
1469 snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
1470
1471 video_set_drvdata(vfd, dev);
1472 return vfd;
1473}
1474
1475int tm6000_v4l2_register(struct tm6000_core *dev)
1476{
1477 int ret = -1;
1478
1479 dev->vfd = vdev_init(dev, &tm6000_template, "video");
1480
1481 if (!dev->vfd) {
1482 printk(KERN_INFO "%s: can't register video device\n",
1483 dev->name);
1460 return -ENOMEM; 1484 return -ENOMEM;
1461 } 1485 }
1462 dev->vfd = vfd;
1463 1486
1464 /* init video dma queues */ 1487 /* init video dma queues */
1465 INIT_LIST_HEAD(&dev->vidq.active); 1488 INIT_LIST_HEAD(&dev->vidq.active);
1466 INIT_LIST_HEAD(&dev->vidq.queued); 1489 INIT_LIST_HEAD(&dev->vidq.queued);
1467 1490
1468 memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd))); 1491 ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
1469 dev->vfd->debug = tm6000_debug;
1470 dev->vfd->lock = &dev->lock;
1471 1492
1472 vfd->v4l2_dev = &dev->v4l2_dev; 1493 if (ret < 0) {
1473 video_set_drvdata(vfd, dev); 1494 printk(KERN_INFO "%s: can't register video device\n",
1495 dev->name);
1496 return ret;
1497 }
1498
1499 printk(KERN_INFO "%s: registered device %s\n",
1500 dev->name, video_device_node_name(dev->vfd));
1474 1501
1475 ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
1476 printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret); 1502 printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
1477 return ret; 1503 return ret;
1478} 1504}
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 30dbfb6d16f2..d73267961ef4 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -32,6 +32,7 @@
32 32
33struct stub_device { 33struct stub_device {
34 struct usb_interface *interface; 34 struct usb_interface *interface;
35 struct usb_device *udev;
35 struct list_head list; 36 struct list_head list;
36 37
37 struct usbip_device ud; 38 struct usbip_device ud;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index b186b5fed2b9..a7ce51cc8909 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -258,10 +258,11 @@ static void stub_shutdown_connection(struct usbip_device *ud)
258static void stub_device_reset(struct usbip_device *ud) 258static void stub_device_reset(struct usbip_device *ud)
259{ 259{
260 struct stub_device *sdev = container_of(ud, struct stub_device, ud); 260 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
261 struct usb_device *udev = interface_to_usbdev(sdev->interface); 261 struct usb_device *udev = sdev->udev;
262 int ret; 262 int ret;
263 263
264 usbip_udbg("device reset"); 264 usbip_udbg("device reset");
265
265 ret = usb_lock_device_for_reset(udev, sdev->interface); 266 ret = usb_lock_device_for_reset(udev, sdev->interface);
266 if (ret < 0) { 267 if (ret < 0) {
267 dev_err(&udev->dev, "lock for reset\n"); 268 dev_err(&udev->dev, "lock for reset\n");
@@ -309,7 +310,8 @@ static void stub_device_unusable(struct usbip_device *ud)
309 * 310 *
310 * Allocates and initializes a new stub_device struct. 311 * Allocates and initializes a new stub_device struct.
311 */ 312 */
312static struct stub_device *stub_device_alloc(struct usb_interface *interface) 313static struct stub_device *stub_device_alloc(struct usb_device *udev,
314 struct usb_interface *interface)
313{ 315{
314 struct stub_device *sdev; 316 struct stub_device *sdev;
315 int busnum = interface_to_busnum(interface); 317 int busnum = interface_to_busnum(interface);
@@ -324,7 +326,8 @@ static struct stub_device *stub_device_alloc(struct usb_interface *interface)
324 return NULL; 326 return NULL;
325 } 327 }
326 328
327 sdev->interface = interface; 329 sdev->interface = usb_get_intf(interface);
330 sdev->udev = usb_get_dev(udev);
328 331
329 /* 332 /*
330 * devid is defined with devnum when this driver is first allocated. 333 * devid is defined with devnum when this driver is first allocated.
@@ -450,11 +453,12 @@ static int stub_probe(struct usb_interface *interface,
450 return err; 453 return err;
451 } 454 }
452 455
456 usb_get_intf(interface);
453 return 0; 457 return 0;
454 } 458 }
455 459
456 /* ok. this is my device. */ 460 /* ok. this is my device. */
457 sdev = stub_device_alloc(interface); 461 sdev = stub_device_alloc(udev, interface);
458 if (!sdev) 462 if (!sdev)
459 return -ENOMEM; 463 return -ENOMEM;
460 464
@@ -476,6 +480,8 @@ static int stub_probe(struct usb_interface *interface,
476 dev_err(&interface->dev, "create sysfs files for %s\n", 480 dev_err(&interface->dev, "create sysfs files for %s\n",
477 udev_busid); 481 udev_busid);
478 usb_set_intfdata(interface, NULL); 482 usb_set_intfdata(interface, NULL);
483 usb_put_intf(interface);
484
479 busid_priv->interf_count = 0; 485 busid_priv->interf_count = 0;
480 486
481 busid_priv->sdev = NULL; 487 busid_priv->sdev = NULL;
@@ -545,6 +551,7 @@ static void stub_disconnect(struct usb_interface *interface)
545 if (busid_priv->interf_count > 1) { 551 if (busid_priv->interf_count > 1) {
546 busid_priv->interf_count--; 552 busid_priv->interf_count--;
547 shutdown_busid(busid_priv); 553 shutdown_busid(busid_priv);
554 usb_put_intf(interface);
548 return; 555 return;
549 } 556 }
550 557
@@ -554,6 +561,9 @@ static void stub_disconnect(struct usb_interface *interface)
554 /* 1. shutdown the current connection */ 561 /* 1. shutdown the current connection */
555 shutdown_busid(busid_priv); 562 shutdown_busid(busid_priv);
556 563
564 usb_put_dev(sdev->udev);
565 usb_put_intf(interface);
566
557 /* 3. free sdev */ 567 /* 3. free sdev */
558 busid_priv->sdev = NULL; 568 busid_priv->sdev = NULL;
559 stub_device_free(sdev); 569 stub_device_free(sdev);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 3de6fd2539dc..ae6ac82754a4 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -364,7 +364,7 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
364 364
365static int get_pipe(struct stub_device *sdev, int epnum, int dir) 365static int get_pipe(struct stub_device *sdev, int epnum, int dir)
366{ 366{
367 struct usb_device *udev = interface_to_usbdev(sdev->interface); 367 struct usb_device *udev = sdev->udev;
368 struct usb_host_endpoint *ep; 368 struct usb_host_endpoint *ep;
369 struct usb_endpoint_descriptor *epd = NULL; 369 struct usb_endpoint_descriptor *epd = NULL;
370 370
@@ -484,7 +484,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
484 int ret; 484 int ret;
485 struct stub_priv *priv; 485 struct stub_priv *priv;
486 struct usbip_device *ud = &sdev->ud; 486 struct usbip_device *ud = &sdev->ud;
487 struct usb_device *udev = interface_to_usbdev(sdev->interface); 487 struct usb_device *udev = sdev->udev;
488 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); 488 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
489 489
490 490
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 41a1fe5138f4..afc3b1a71881 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -100,9 +100,6 @@ struct vhci_hcd {
100 * But, the index of this array begins from 0. 100 * But, the index of this array begins from 0.
101 */ 101 */
102 struct vhci_device vdev[VHCI_NPORTS]; 102 struct vhci_device vdev[VHCI_NPORTS];
103
104 /* vhci_device which has not been assiged its address yet */
105 int pending_port;
106}; 103};
107 104
108 105
@@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport);
119void vhci_rx_loop(struct usbip_task *ut); 116void vhci_rx_loop(struct usbip_task *ut);
120void vhci_tx_loop(struct usbip_task *ut); 117void vhci_tx_loop(struct usbip_task *ut);
121 118
119struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
120 __u32 seqnum);
121
122#define hardware (&the_controller->pdev.dev) 122#define hardware (&the_controller->pdev.dev)
123 123
124static inline struct vhci_device *port_to_vdev(__u32 port) 124static inline struct vhci_device *port_to_vdev(__u32 port)
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 08bd26a245d5..a35fe61268de 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
138 * the_controller->vdev[rhport].ud.status = VDEV_CONNECT; 138 * the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
139 * spin_unlock(&the_controller->vdev[rhport].ud.lock); */ 139 * spin_unlock(&the_controller->vdev[rhport].ud.lock); */
140 140
141 the_controller->pending_port = rhport;
142
143 spin_unlock_irqrestore(&the_controller->lock, flags); 141 spin_unlock_irqrestore(&the_controller->lock, flags);
144 142
145 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); 143 usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -559,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
559 struct device *dev = &urb->dev->dev; 557 struct device *dev = &urb->dev->dev;
560 int ret = 0; 558 int ret = 0;
561 unsigned long flags; 559 unsigned long flags;
560 struct vhci_device *vdev;
562 561
563 usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", 562 usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
564 hcd, urb, mem_flags); 563 hcd, urb, mem_flags);
@@ -574,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
574 return urb->status; 573 return urb->status;
575 } 574 }
576 575
576 vdev = port_to_vdev(urb->dev->portnum-1);
577
578 /* refuse enqueue for dead connection */
579 spin_lock(&vdev->ud.lock);
580 if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
581 usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
582 spin_unlock(&vdev->ud.lock);
583 spin_unlock_irqrestore(&the_controller->lock, flags);
584 return -ENODEV;
585 }
586 spin_unlock(&vdev->ud.lock);
587
577 ret = usb_hcd_link_urb_to_ep(hcd, urb); 588 ret = usb_hcd_link_urb_to_ep(hcd, urb);
578 if (ret) 589 if (ret)
579 goto no_need_unlink; 590 goto no_need_unlink;
@@ -592,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
592 __u8 type = usb_pipetype(urb->pipe); 603 __u8 type = usb_pipetype(urb->pipe);
593 struct usb_ctrlrequest *ctrlreq = 604 struct usb_ctrlrequest *ctrlreq =
594 (struct usb_ctrlrequest *) urb->setup_packet; 605 (struct usb_ctrlrequest *) urb->setup_packet;
595 struct vhci_device *vdev =
596 port_to_vdev(the_controller->pending_port);
597 606
598 if (type != PIPE_CONTROL || !ctrlreq) { 607 if (type != PIPE_CONTROL || !ctrlreq) {
599 dev_err(dev, "invalid request to devnum 0\n"); 608 dev_err(dev, "invalid request to devnum 0\n");
@@ -607,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
607 dev_info(dev, "SetAddress Request (%d) to port %d\n", 616 dev_info(dev, "SetAddress Request (%d) to port %d\n",
608 ctrlreq->wValue, vdev->rhport); 617 ctrlreq->wValue, vdev->rhport);
609 618
610 vdev->udev = urb->dev; 619 if (vdev->udev)
620 usb_put_dev(vdev->udev);
621 vdev->udev = usb_get_dev(urb->dev);
611 622
612 spin_lock(&vdev->ud.lock); 623 spin_lock(&vdev->ud.lock);
613 vdev->ud.status = VDEV_ST_USED; 624 vdev->ud.status = VDEV_ST_USED;
@@ -627,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
627 "Get_Descriptor to device 0 " 638 "Get_Descriptor to device 0 "
628 "(get max pipe size)\n"); 639 "(get max pipe size)\n");
629 640
630 /* FIXME: reference count? (usb_get_dev()) */ 641 if (vdev->udev)
631 vdev->udev = urb->dev; 642 usb_put_dev(vdev->udev);
643 vdev->udev = usb_get_dev(urb->dev);
632 goto out; 644 goto out;
633 645
634 default: 646 default:
@@ -805,7 +817,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
805 return 0; 817 return 0;
806} 818}
807 819
808
809static void vhci_device_unlink_cleanup(struct vhci_device *vdev) 820static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
810{ 821{
811 struct vhci_unlink *unlink, *tmp; 822 struct vhci_unlink *unlink, *tmp;
@@ -813,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
813 spin_lock(&vdev->priv_lock); 824 spin_lock(&vdev->priv_lock);
814 825
815 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { 826 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
827 usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
816 list_del(&unlink->list); 828 list_del(&unlink->list);
817 kfree(unlink); 829 kfree(unlink);
818 } 830 }
819 831
820 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { 832 list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
833 struct urb *urb;
834
835 /* give back URB of unanswered unlink request */
836 usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
837
838 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
839 if (!urb) {
840 usbip_uinfo("the urb (seqnum %lu) was already given back\n",
841 unlink->unlink_seqnum);
842 list_del(&unlink->list);
843 kfree(unlink);
844 continue;
845 }
846
847 urb->status = -ENODEV;
848
849 spin_lock(&the_controller->lock);
850 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
851 spin_unlock(&the_controller->lock);
852
853 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
854
821 list_del(&unlink->list); 855 list_del(&unlink->list);
822 kfree(unlink); 856 kfree(unlink);
823 } 857 }
@@ -887,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud)
887 vdev->speed = 0; 921 vdev->speed = 0;
888 vdev->devid = 0; 922 vdev->devid = 0;
889 923
924 if (vdev->udev)
925 usb_put_dev(vdev->udev);
926 vdev->udev = NULL;
927
890 ud->tcp_socket = NULL; 928 ud->tcp_socket = NULL;
891 929
892 ud->status = VDEV_ST_NULL; 930 ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 8147d7202b2d..bf6991470941 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -23,16 +23,14 @@
23#include "vhci.h" 23#include "vhci.h"
24 24
25 25
26/* get URB from transmitted urb queue */ 26/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
27static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, 27struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
28 __u32 seqnum) 28 __u32 seqnum)
29{ 29{
30 struct vhci_priv *priv, *tmp; 30 struct vhci_priv *priv, *tmp;
31 struct urb *urb = NULL; 31 struct urb *urb = NULL;
32 int status; 32 int status;
33 33
34 spin_lock(&vdev->priv_lock);
35
36 list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) { 34 list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
37 if (priv->seqnum == seqnum) { 35 if (priv->seqnum == seqnum) {
38 urb = priv->urb; 36 urb = priv->urb;
@@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
63 } 61 }
64 } 62 }
65 63
66 spin_unlock(&vdev->priv_lock);
67
68 return urb; 64 return urb;
69} 65}
70 66
@@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
74 struct usbip_device *ud = &vdev->ud; 70 struct usbip_device *ud = &vdev->ud;
75 struct urb *urb; 71 struct urb *urb;
76 72
73 spin_lock(&vdev->priv_lock);
77 74
78 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); 75 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
79 76
77 spin_unlock(&vdev->priv_lock);
80 78
81 if (!urb) { 79 if (!urb) {
82 usbip_uerr("cannot find a urb of seqnum %u\n", 80 usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -161,7 +159,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
161 return; 159 return;
162 } 160 }
163 161
162 spin_lock(&vdev->priv_lock);
163
164 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); 164 urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
165
166 spin_unlock(&vdev->priv_lock);
167
165 if (!urb) { 168 if (!urb) {
166 /* 169 /*
167 * I get the result of a unlink request. But, it seems that I 170 * I get the result of a unlink request. But, it seems that I
@@ -190,6 +193,19 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
190 return; 193 return;
191} 194}
192 195
196static int vhci_priv_tx_empty(struct vhci_device *vdev)
197{
198 int empty = 0;
199
200 spin_lock(&vdev->priv_lock);
201
202 empty = list_empty(&vdev->priv_rx);
203
204 spin_unlock(&vdev->priv_lock);
205
206 return empty;
207}
208
193/* recv a pdu */ 209/* recv a pdu */
194static void vhci_rx_pdu(struct usbip_device *ud) 210static void vhci_rx_pdu(struct usbip_device *ud)
195{ 211{
@@ -202,11 +218,29 @@ static void vhci_rx_pdu(struct usbip_device *ud)
202 218
203 memset(&pdu, 0, sizeof(pdu)); 219 memset(&pdu, 0, sizeof(pdu));
204 220
205
206 /* 1. receive a pdu header */ 221 /* 1. receive a pdu header */
207 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0); 222 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
223 if (ret < 0) {
224 if (ret == -ECONNRESET)
225 usbip_uinfo("connection reset by peer\n");
226 else if (ret == -EAGAIN) {
227 /* ignore if connection was idle */
228 if (vhci_priv_tx_empty(vdev))
229 return;
230 usbip_uinfo("connection timed out with pending urbs\n");
231 } else if (ret != -ERESTARTSYS)
232 usbip_uinfo("xmit failed %d\n", ret);
233
234 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
235 return;
236 }
237 if (ret == 0) {
238 usbip_uinfo("connection closed");
239 usbip_event_add(ud, VDEV_EVENT_DOWN);
240 return;
241 }
208 if (ret != sizeof(pdu)) { 242 if (ret != sizeof(pdu)) {
209 usbip_uerr("receiving pdu failed! size is %d, should be %d\n", 243 usbip_uerr("received pdu size is %d, should be %d\n",
210 ret, (unsigned int)sizeof(pdu)); 244 ret, (unsigned int)sizeof(pdu));
211 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); 245 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
212 return; 246 return;
diff --git a/drivers/staging/vme/bridges/Module.symvers b/drivers/staging/vme/bridges/Module.symvers
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/vme/bridges/Module.symvers
+++ /dev/null
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 7016fdd2509f..e19b932492e1 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -3954,8 +3954,8 @@ void XGI_GetCRT2ResInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
3954unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo) 3954unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
3955{ 3955{
3956 3956
3957 if ((((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) 3957 if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
3958 && (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */ 3958 (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
3959 return 1; 3959 return 1;
3960 3960
3961 return 0; 3961 return 0;
@@ -8773,7 +8773,7 @@ unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
8773 8773
8774 if (pVBInfo->IF_DEF_LVDS == 0) { 8774 if (pVBInfo->IF_DEF_LVDS == 0) {
8775 CRT2Index = CRT2Index >> 6; /* for LCD */ 8775 CRT2Index = CRT2Index >> 6; /* for LCD */
8776 if (((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) { /*301b*/ 8776 if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
8777 if (pVBInfo->LCDResInfo != Panel1024x768) 8777 if (pVBInfo->LCDResInfo != Panel1024x768)
8778 VCLKIndex = LCDXlat2VCLK[CRT2Index]; 8778 VCLKIndex = LCDXlat2VCLK[CRT2Index];
8779 else 8779 else
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5415712f01f8..4bd8cbdaee76 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
227 227
228 if (zram_test_flag(zram, index, ZRAM_ZERO)) { 228 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
229 handle_zero_page(page); 229 handle_zero_page(page);
230 index++;
230 continue; 231 continue;
231 } 232 }
232 233
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
235 pr_debug("Read before write: sector=%lu, size=%u", 236 pr_debug("Read before write: sector=%lu, size=%u",
236 (ulong)(bio->bi_sector), bio->bi_size); 237 (ulong)(bio->bi_sector), bio->bi_size);
237 /* Do nothing */ 238 /* Do nothing */
239 index++;
238 continue; 240 continue;
239 } 241 }
240 242
241 /* Page is stored uncompressed since it's incompressible */ 243 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 244 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
243 handle_uncompressed_page(zram, page, index); 245 handle_uncompressed_page(zram, page, index);
246 index++;
244 continue; 247 continue;
245 } 248 }
246 249
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
320 mutex_unlock(&zram->lock); 323 mutex_unlock(&zram->lock);
321 zram_stat_inc(&zram->stats.pages_zero); 324 zram_stat_inc(&zram->stats.pages_zero);
322 zram_set_flag(zram, index, ZRAM_ZERO); 325 zram_set_flag(zram, index, ZRAM_ZERO);
326 index++;
323 continue; 327 continue;
324 } 328 }
325 329
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 000000000000..2fac3be209ac
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
1
2menuconfig TARGET_CORE
3 tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
4 depends on SCSI && BLOCK
5 select CONFIGFS_FS
6 default n
7 help
8 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
9 control path for target_core_mod. This includes built-in TCM RAMDISK
10 subsystem logic for virtual LUN 0 access
11
12if TARGET_CORE
13
14config TCM_IBLOCK
15 tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
16 help
17 Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
18 access to Linux/Block devices using BIO
19
20config TCM_FILEIO
21 tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
22 help
23 Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
24 access to Linux/VFS struct file or struct block_device
25
26config TCM_PSCSI
27 tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
28 help
29 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
30 passthrough access to Linux/SCSI device
31
32endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 000000000000..5cfd70819f08
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,24 @@
1EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
2
3target_core_mod-y := target_core_configfs.o \
4 target_core_device.o \
5 target_core_fabric_configfs.o \
6 target_core_fabric_lib.o \
7 target_core_hba.o \
8 target_core_pr.o \
9 target_core_alua.o \
10 target_core_scdb.o \
11 target_core_tmr.o \
12 target_core_tpg.o \
13 target_core_transport.o \
14 target_core_cdb.o \
15 target_core_ua.o \
16 target_core_rd.o \
17 target_core_mib.o
18
19obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
20
21# Subsystem modules
22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 000000000000..2c5fcfed5934
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
1/*******************************************************************************
2 * Filename: target_core_alua.c
3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 *
6 * Copyright (c) 2009-2010 Rising Tide Systems
7 * Copyright (c) 2009-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/configfs.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_device.h>
36#include <target/target_core_transport.h>
37#include <target/target_core_fabric_ops.h>
38#include <target/target_core_configfs.h>
39
40#include "target_core_alua.h"
41#include "target_core_hba.h"
42#include "target_core_ua.h"
43
44static int core_alua_check_transition(int state, int *primary);
45static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline);
48
49/*
50 * REPORT_TARGET_PORT_GROUPS
51 *
52 * See spc4r17 section 6.27
53 */
54int core_emulate_report_target_port_groups(struct se_cmd *cmd)
55{
56 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
57 struct se_port *port;
58 struct t10_alua_tg_pt_gp *tg_pt_gp;
59 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
60 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
61 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
62 Target port group descriptor */
63
64 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
65 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
66 tg_pt_gp_list) {
67 /*
68 * PREF: Preferred target port bit, determine if this
69 * bit should be set for port group.
70 */
71 if (tg_pt_gp->tg_pt_gp_pref)
72 buf[off] = 0x80;
73 /*
74 * Set the ASYMMETRIC ACCESS State
75 */
76 buf[off++] |= (atomic_read(
77 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
78 /*
79 * Set supported ASYMMETRIC ACCESS State bits
80 */
81 buf[off] = 0x80; /* T_SUP */
82 buf[off] |= 0x40; /* O_SUP */
83 buf[off] |= 0x8; /* U_SUP */
84 buf[off] |= 0x4; /* S_SUP */
85 buf[off] |= 0x2; /* AN_SUP */
86 buf[off++] |= 0x1; /* AO_SUP */
87 /*
88 * TARGET PORT GROUP
89 */
90 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
91 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
92
93 off++; /* Skip over Reserved */
94 /*
95 * STATUS CODE
96 */
97 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
98 /*
99 * Vendor Specific field
100 */
101 buf[off++] = 0x00;
102 /*
103 * TARGET PORT COUNT
104 */
105 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
106 rd_len += 8;
107
108 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
109 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
110 tg_pt_gp_mem_list) {
111 port = tg_pt_gp_mem->tg_pt;
112 /*
113 * Start Target Port descriptor format
114 *
115 * See spc4r17 section 6.2.7 Table 247
116 */
117 off += 2; /* Skip over Obsolete */
118 /*
119 * Set RELATIVE TARGET PORT IDENTIFIER
120 */
121 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
122 buf[off++] = (port->sep_rtpi & 0xff);
123 rd_len += 4;
124 }
125 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
126 }
127 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
128 /*
129 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
130 */
131 buf[0] = ((rd_len >> 24) & 0xff);
132 buf[1] = ((rd_len >> 16) & 0xff);
133 buf[2] = ((rd_len >> 8) & 0xff);
134 buf[3] = (rd_len & 0xff);
135
136 return 0;
137}
138
139/*
140 * SET_TARGET_PORT_GROUPS for explict ALUA operation.
141 *
142 * See spc4r17 section 6.35
143 */
144int core_emulate_set_target_port_groups(struct se_cmd *cmd)
145{
146 struct se_device *dev = SE_DEV(cmd);
147 struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
148 struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
149 struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
150 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
152 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
153 unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
154 u32 len = 4; /* Skip over RESERVED area in header */
155 int alua_access_state, primary = 0, rc;
156 u16 tg_pt_id, rtpi;
157
158 if (!(l_port))
159 return PYX_TRANSPORT_LU_COMM_FAILURE;
160 /*
161 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
162 * for the local tg_pt_gp.
163 */
164 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
165 if (!(l_tg_pt_gp_mem)) {
166 printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
167 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
168 }
169 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
170 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
171 if (!(l_tg_pt_gp)) {
172 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
173 printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
174 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
175 }
176 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
177 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
178
179 if (!(rc)) {
180 printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
181 " while TPGS_EXPLICT_ALUA is disabled\n");
182 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
183 }
184
185 while (len < cmd->data_length) {
186 alua_access_state = (ptr[0] & 0x0f);
187 /*
188 * Check the received ALUA access state, and determine if
189 * the state is a primary or secondary target port asymmetric
190 * access state.
191 */
192 rc = core_alua_check_transition(alua_access_state, &primary);
193 if (rc != 0) {
194 /*
195 * If the SET TARGET PORT GROUPS attempts to establish
196 * an invalid combination of target port asymmetric
197 * access states or attempts to establish an
198 * unsupported target port asymmetric access state,
199 * then the command shall be terminated with CHECK
200 * CONDITION status, with the sense key set to ILLEGAL
201 * REQUEST, and the additional sense code set to INVALID
202 * FIELD IN PARAMETER LIST.
203 */
204 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
205 }
206 rc = -1;
207 /*
208 * If the ASYMMETRIC ACCESS STATE field (see table 267)
209 * specifies a primary target port asymmetric access state,
210 * then the TARGET PORT GROUP OR TARGET PORT field specifies
211 * a primary target port group for which the primary target
212 * port asymmetric access state shall be changed. If the
213 * ASYMMETRIC ACCESS STATE field specifies a secondary target
214 * port asymmetric access state, then the TARGET PORT GROUP OR
215 * TARGET PORT field specifies the relative target port
216 * identifier (see 3.1.120) of the target port for which the
217 * secondary target port asymmetric access state shall be
218 * changed.
219 */
220 if (primary) {
221 tg_pt_id = ((ptr[2] << 8) & 0xff);
222 tg_pt_id |= (ptr[3] & 0xff);
223 /*
224 * Locate the matching target port group ID from
225 * the global tg_pt_gp list
226 */
227 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
228 list_for_each_entry(tg_pt_gp,
229 &T10_ALUA(su_dev)->tg_pt_gps_list,
230 tg_pt_gp_list) {
231 if (!(tg_pt_gp->tg_pt_gp_valid_id))
232 continue;
233
234 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
235 continue;
236
237 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
238 smp_mb__after_atomic_inc();
239 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
240
241 rc = core_alua_do_port_transition(tg_pt_gp,
242 dev, l_port, nacl,
243 alua_access_state, 1);
244
245 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
246 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
247 smp_mb__after_atomic_dec();
248 break;
249 }
250 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
251 /*
252 * If not matching target port group ID can be located
253 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
254 */
255 if (rc != 0)
256 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
257 } else {
258 /*
259 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
260 * the Target Port in question for the the incoming
261 * SET_TARGET_PORT_GROUPS op.
262 */
263 rtpi = ((ptr[2] << 8) & 0xff);
264 rtpi |= (ptr[3] & 0xff);
265 /*
266 * Locate the matching relative target port identifer
267 * for the struct se_device storage object.
268 */
269 spin_lock(&dev->se_port_lock);
270 list_for_each_entry(port, &dev->dev_sep_list,
271 sep_list) {
272 if (port->sep_rtpi != rtpi)
273 continue;
274
275 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
276 spin_unlock(&dev->se_port_lock);
277
278 rc = core_alua_set_tg_pt_secondary_state(
279 tg_pt_gp_mem, port, 1, 1);
280
281 spin_lock(&dev->se_port_lock);
282 break;
283 }
284 spin_unlock(&dev->se_port_lock);
285 /*
286 * If not matching relative target port identifier can
287 * be located, throw an exception with ASCQ:
288 * INVALID_PARAMETER_LIST
289 */
290 if (rc != 0)
291 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
292 }
293
294 ptr += 4;
295 len += 4;
296 }
297
298 return 0;
299}
300
301static inline int core_alua_state_nonoptimized(
302 struct se_cmd *cmd,
303 unsigned char *cdb,
304 int nonop_delay_msecs,
305 u8 *alua_ascq)
306{
307 /*
308 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
309 * later to determine if processing of this cmd needs to be
310 * temporarily delayed for the Active/NonOptimized primary access state.
311 */
312 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
313 cmd->alua_nonop_delay = nonop_delay_msecs;
314 return 0;
315}
316
317static inline int core_alua_state_standby(
318 struct se_cmd *cmd,
319 unsigned char *cdb,
320 u8 *alua_ascq)
321{
322 /*
323 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
324 * spc4r17 section 5.9.2.4.4
325 */
326 switch (cdb[0]) {
327 case INQUIRY:
328 case LOG_SELECT:
329 case LOG_SENSE:
330 case MODE_SELECT:
331 case MODE_SENSE:
332 case REPORT_LUNS:
333 case RECEIVE_DIAGNOSTIC:
334 case SEND_DIAGNOSTIC:
335 case MAINTENANCE_IN:
336 switch (cdb[1]) {
337 case MI_REPORT_TARGET_PGS:
338 return 0;
339 default:
340 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
341 return 1;
342 }
343 case MAINTENANCE_OUT:
344 switch (cdb[1]) {
345 case MO_SET_TARGET_PGS:
346 return 0;
347 default:
348 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
349 return 1;
350 }
351 case REQUEST_SENSE:
352 case PERSISTENT_RESERVE_IN:
353 case PERSISTENT_RESERVE_OUT:
354 case READ_BUFFER:
355 case WRITE_BUFFER:
356 return 0;
357 default:
358 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
359 return 1;
360 }
361
362 return 0;
363}
364
365static inline int core_alua_state_unavailable(
366 struct se_cmd *cmd,
367 unsigned char *cdb,
368 u8 *alua_ascq)
369{
370 /*
371 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
372 * spc4r17 section 5.9.2.4.5
373 */
374 switch (cdb[0]) {
375 case INQUIRY:
376 case REPORT_LUNS:
377 case MAINTENANCE_IN:
378 switch (cdb[1]) {
379 case MI_REPORT_TARGET_PGS:
380 return 0;
381 default:
382 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
383 return 1;
384 }
385 case MAINTENANCE_OUT:
386 switch (cdb[1]) {
387 case MO_SET_TARGET_PGS:
388 return 0;
389 default:
390 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
391 return 1;
392 }
393 case REQUEST_SENSE:
394 case READ_BUFFER:
395 case WRITE_BUFFER:
396 return 0;
397 default:
398 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
399 return 1;
400 }
401
402 return 0;
403}
404
405static inline int core_alua_state_transition(
406 struct se_cmd *cmd,
407 unsigned char *cdb,
408 u8 *alua_ascq)
409{
410 /*
411 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
412 * spc4r17 section 5.9.2.5
413 */
414 switch (cdb[0]) {
415 case INQUIRY:
416 case REPORT_LUNS:
417 case MAINTENANCE_IN:
418 switch (cdb[1]) {
419 case MI_REPORT_TARGET_PGS:
420 return 0;
421 default:
422 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
423 return 1;
424 }
425 case REQUEST_SENSE:
426 case READ_BUFFER:
427 case WRITE_BUFFER:
428 return 0;
429 default:
430 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
431 return 1;
432 }
433
434 return 0;
435}
436
437/*
438 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
439 * in transport_cmd_sequencer(). This function is assigned to
440 * struct t10_alua *->state_check() in core_setup_alua()
441 */
442static int core_alua_state_check_nop(
443 struct se_cmd *cmd,
444 unsigned char *cdb,
445 u8 *alua_ascq)
446{
447 return 0;
448}
449
450/*
451 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
452 * This function is assigned to struct t10_alua *->state_check() in
453 * core_setup_alua()
454 *
455 * Also, this function can return three different return codes to
456 * signal transport_generic_cmd_sequencer()
457 *
458 * return 1: Is used to signal LUN not accecsable, and check condition/not ready
459 * return 0: Used to signal success
460 * reutrn -1: Used to signal failure, and invalid cdb field
461 */
462static int core_alua_state_check(
463 struct se_cmd *cmd,
464 unsigned char *cdb,
465 u8 *alua_ascq)
466{
467 struct se_lun *lun = SE_LUN(cmd);
468 struct se_port *port = lun->lun_sep;
469 struct t10_alua_tg_pt_gp *tg_pt_gp;
470 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
471 int out_alua_state, nonop_delay_msecs;
472
473 if (!(port))
474 return 0;
475 /*
476 * First, check for a struct se_port specific secondary ALUA target port
477 * access state: OFFLINE
478 */
479 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
480 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
481 printk(KERN_INFO "ALUA: Got secondary offline status for local"
482 " target port\n");
483 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
484 return 1;
485 }
486 /*
487 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
488 * ALUA target port group, to obtain current ALUA access state.
489 * Otherwise look for the underlying struct se_device association with
490 * a ALUA logical unit group.
491 */
492 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
493 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
494 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
495 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
496 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
497 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
498 /*
499 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
500 * statement so the complier knows explictly to check this case first.
501 * For the Optimized ALUA access state case, we want to process the
502 * incoming fabric cmd ASAP..
503 */
504 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
505 return 0;
506
507 switch (out_alua_state) {
508 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
509 return core_alua_state_nonoptimized(cmd, cdb,
510 nonop_delay_msecs, alua_ascq);
511 case ALUA_ACCESS_STATE_STANDBY:
512 return core_alua_state_standby(cmd, cdb, alua_ascq);
513 case ALUA_ACCESS_STATE_UNAVAILABLE:
514 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
515 case ALUA_ACCESS_STATE_TRANSITION:
516 return core_alua_state_transition(cmd, cdb, alua_ascq);
517 /*
518 * OFFLINE is a secondary ALUA target port group access state, that is
519 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
520 */
521 case ALUA_ACCESS_STATE_OFFLINE:
522 default:
523 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
524 out_alua_state);
525 return -1;
526 }
527
528 return 0;
529}
530
531/*
532 * Check implict and explict ALUA state change request.
533 */
534static int core_alua_check_transition(int state, int *primary)
535{
536 switch (state) {
537 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
538 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
539 case ALUA_ACCESS_STATE_STANDBY:
540 case ALUA_ACCESS_STATE_UNAVAILABLE:
541 /*
542 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
543 * defined as primary target port asymmetric access states.
544 */
545 *primary = 1;
546 break;
547 case ALUA_ACCESS_STATE_OFFLINE:
548 /*
549 * OFFLINE state is defined as a secondary target port
550 * asymmetric access state.
551 */
552 *primary = 0;
553 break;
554 default:
555 printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
556 return -1;
557 }
558
559 return 0;
560}
561
562static char *core_alua_dump_state(int state)
563{
564 switch (state) {
565 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
566 return "Active/Optimized";
567 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
568 return "Active/NonOptimized";
569 case ALUA_ACCESS_STATE_STANDBY:
570 return "Standby";
571 case ALUA_ACCESS_STATE_UNAVAILABLE:
572 return "Unavailable";
573 case ALUA_ACCESS_STATE_OFFLINE:
574 return "Offline";
575 default:
576 return "Unknown";
577 }
578
579 return NULL;
580}
581
582char *core_alua_dump_status(int status)
583{
584 switch (status) {
585 case ALUA_STATUS_NONE:
586 return "None";
587 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
588 return "Altered by Explict STPG";
589 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
590 return "Altered by Implict ALUA";
591 default:
592 return "Unknown";
593 }
594
595 return NULL;
596}
597
598/*
599 * Used by fabric modules to determine when we need to delay processing
600 * for the Active/NonOptimized paths..
601 */
602int core_alua_check_nonop_delay(
603 struct se_cmd *cmd)
604{
605 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
606 return 0;
607 if (in_interrupt())
608 return 0;
609 /*
610 * The ALUA Active/NonOptimized access state delay can be disabled
611 * in via configfs with a value of zero
612 */
613 if (!(cmd->alua_nonop_delay))
614 return 0;
615 /*
616 * struct se_cmd->alua_nonop_delay gets set by a target port group
617 * defined interval in core_alua_state_nonoptimized()
618 */
619 msleep_interruptible(cmd->alua_nonop_delay);
620 return 0;
621}
622EXPORT_SYMBOL(core_alua_check_nonop_delay);
623
624/*
625 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
626 *
627 */
628static int core_alua_write_tpg_metadata(
629 const char *path,
630 unsigned char *md_buf,
631 u32 md_buf_len)
632{
633 mm_segment_t old_fs;
634 struct file *file;
635 struct iovec iov[1];
636 int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
637
638 memset(iov, 0, sizeof(struct iovec));
639
640 file = filp_open(path, flags, 0600);
641 if (IS_ERR(file) || !file || !file->f_dentry) {
642 printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
643 path);
644 return -ENODEV;
645 }
646
647 iov[0].iov_base = &md_buf[0];
648 iov[0].iov_len = md_buf_len;
649
650 old_fs = get_fs();
651 set_fs(get_ds());
652 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
653 set_fs(old_fs);
654
655 if (ret < 0) {
656 printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
657 filp_close(file, NULL);
658 return -EIO;
659 }
660 filp_close(file, NULL);
661
662 return 0;
663}
664
665/*
666 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
667 */
668static int core_alua_update_tpg_primary_metadata(
669 struct t10_alua_tg_pt_gp *tg_pt_gp,
670 int primary_state,
671 unsigned char *md_buf)
672{
673 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
674 struct t10_wwn *wwn = &su_dev->t10_wwn;
675 char path[ALUA_METADATA_PATH_LEN];
676 int len;
677
678 memset(path, 0, ALUA_METADATA_PATH_LEN);
679
680 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
681 "tg_pt_gp_id=%hu\n"
682 "alua_access_state=0x%02x\n"
683 "alua_access_status=0x%02x\n",
684 tg_pt_gp->tg_pt_gp_id, primary_state,
685 tg_pt_gp->tg_pt_gp_alua_access_status);
686
687 snprintf(path, ALUA_METADATA_PATH_LEN,
688 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
689 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
690
691 return core_alua_write_tpg_metadata(path, md_buf, len);
692}
693
694static int core_alua_do_transition_tg_pt(
695 struct t10_alua_tg_pt_gp *tg_pt_gp,
696 struct se_port *l_port,
697 struct se_node_acl *nacl,
698 unsigned char *md_buf,
699 int new_state,
700 int explict)
701{
702 struct se_dev_entry *se_deve;
703 struct se_lun_acl *lacl;
704 struct se_port *port;
705 struct t10_alua_tg_pt_gp_member *mem;
706 int old_state = 0;
707 /*
708 * Save the old primary ALUA access state, and set the current state
709 * to ALUA_ACCESS_STATE_TRANSITION.
710 */
711 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
712 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
713 ALUA_ACCESS_STATE_TRANSITION);
714 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
715 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
716 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
717 /*
718 * Check for the optional ALUA primary state transition delay
719 */
720 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
721 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
722
723 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
724 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
725 tg_pt_gp_mem_list) {
726 port = mem->tg_pt;
727 /*
728 * After an implicit target port asymmetric access state
729 * change, a device server shall establish a unit attention
730 * condition for the initiator port associated with every I_T
731 * nexus with the additional sense code set to ASYMMETRIC
732 * ACCESS STATE CHAGED.
733 *
734 * After an explicit target port asymmetric access state
735 * change, a device server shall establish a unit attention
736 * condition with the additional sense code set to ASYMMETRIC
737 * ACCESS STATE CHANGED for the initiator port associated with
738 * every I_T nexus other than the I_T nexus on which the SET
739 * TARGET PORT GROUPS command
740 */
741 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
742 smp_mb__after_atomic_inc();
743 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
744
745 spin_lock_bh(&port->sep_alua_lock);
746 list_for_each_entry(se_deve, &port->sep_alua_list,
747 alua_port_list) {
748 lacl = se_deve->se_lun_acl;
749 /*
750 * se_deve->se_lun_acl pointer may be NULL for a
751 * entry created without explict Node+MappedLUN ACLs
752 */
753 if (!(lacl))
754 continue;
755
756 if (explict &&
757 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
758 (l_port != NULL) && (l_port == port))
759 continue;
760
761 core_scsi3_ua_allocate(lacl->se_lun_nacl,
762 se_deve->mapped_lun, 0x2A,
763 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
764 }
765 spin_unlock_bh(&port->sep_alua_lock);
766
767 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
768 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
769 smp_mb__after_atomic_dec();
770 }
771 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
772 /*
773 * Update the ALUA metadata buf that has been allocated in
774 * core_alua_do_port_transition(), this metadata will be written
775 * to struct file.
776 *
777 * Note that there is the case where we do not want to update the
778 * metadata when the saved metadata is being parsed in userspace
779 * when setting the existing port access state and access status.
780 *
781 * Also note that the failure to write out the ALUA metadata to
782 * struct file does NOT affect the actual ALUA transition.
783 */
784 if (tg_pt_gp->tg_pt_gp_write_metadata) {
785 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
786 core_alua_update_tpg_primary_metadata(tg_pt_gp,
787 new_state, md_buf);
788 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
789 }
790 /*
791 * Set the current primary ALUA access state to the requested new state
792 */
793 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
794
795 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
796 " from primary access state %s to %s\n", (explict) ? "explict" :
797 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
798 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
799 core_alua_dump_state(new_state));
800
801 return 0;
802}
803
804int core_alua_do_port_transition(
805 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
806 struct se_device *l_dev,
807 struct se_port *l_port,
808 struct se_node_acl *l_nacl,
809 int new_state,
810 int explict)
811{
812 struct se_device *dev;
813 struct se_port *port;
814 struct se_subsystem_dev *su_dev;
815 struct se_node_acl *nacl;
816 struct t10_alua_lu_gp *lu_gp;
817 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
818 struct t10_alua_tg_pt_gp *tg_pt_gp;
819 unsigned char *md_buf;
820 int primary;
821
822 if (core_alua_check_transition(new_state, &primary) != 0)
823 return -EINVAL;
824
825 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
826 if (!(md_buf)) {
827 printk("Unable to allocate buf for ALUA metadata\n");
828 return -ENOMEM;
829 }
830
831 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
832 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
833 lu_gp = local_lu_gp_mem->lu_gp;
834 atomic_inc(&lu_gp->lu_gp_ref_cnt);
835 smp_mb__after_atomic_inc();
836 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
837 /*
838 * For storage objects that are members of the 'default_lu_gp',
839 * we only do transition on the passed *l_tp_pt_gp, and not
840 * on all of the matching target port groups IDs in default_lu_gp.
841 */
842 if (!(lu_gp->lu_gp_id)) {
843 /*
844 * core_alua_do_transition_tg_pt() will always return
845 * success.
846 */
847 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
848 md_buf, new_state, explict);
849 atomic_dec(&lu_gp->lu_gp_ref_cnt);
850 smp_mb__after_atomic_dec();
851 kfree(md_buf);
852 return 0;
853 }
854 /*
855 * For all other LU groups aside from 'default_lu_gp', walk all of
856 * the associated storage objects looking for a matching target port
857 * group ID from the local target port group.
858 */
859 spin_lock(&lu_gp->lu_gp_lock);
860 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
861 lu_gp_mem_list) {
862
863 dev = lu_gp_mem->lu_gp_mem_dev;
864 su_dev = dev->se_sub_dev;
865 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
866 smp_mb__after_atomic_inc();
867 spin_unlock(&lu_gp->lu_gp_lock);
868
869 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
870 list_for_each_entry(tg_pt_gp,
871 &T10_ALUA(su_dev)->tg_pt_gps_list,
872 tg_pt_gp_list) {
873
874 if (!(tg_pt_gp->tg_pt_gp_valid_id))
875 continue;
876 /*
877 * If the target behavior port asymmetric access state
878 * is changed for any target port group accessiable via
879 * a logical unit within a LU group, the target port
880 * behavior group asymmetric access states for the same
881 * target port group accessible via other logical units
882 * in that LU group will also change.
883 */
884 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
885 continue;
886
887 if (l_tg_pt_gp == tg_pt_gp) {
888 port = l_port;
889 nacl = l_nacl;
890 } else {
891 port = NULL;
892 nacl = NULL;
893 }
894 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
895 smp_mb__after_atomic_inc();
896 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
897 /*
898 * core_alua_do_transition_tg_pt() will always return
899 * success.
900 */
901 core_alua_do_transition_tg_pt(tg_pt_gp, port,
902 nacl, md_buf, new_state, explict);
903
904 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
905 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
906 smp_mb__after_atomic_dec();
907 }
908 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
909
910 spin_lock(&lu_gp->lu_gp_lock);
911 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
912 smp_mb__after_atomic_dec();
913 }
914 spin_unlock(&lu_gp->lu_gp_lock);
915
916 printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
917 " Group IDs: %hu %s transition to primary state: %s\n",
918 config_item_name(&lu_gp->lu_gp_group.cg_item),
919 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
920 core_alua_dump_state(new_state));
921
922 atomic_dec(&lu_gp->lu_gp_ref_cnt);
923 smp_mb__after_atomic_dec();
924 kfree(md_buf);
925 return 0;
926}
927
928/*
929 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
930 */
931static int core_alua_update_tpg_secondary_metadata(
932 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
933 struct se_port *port,
934 unsigned char *md_buf,
935 u32 md_buf_len)
936{
937 struct se_portal_group *se_tpg = port->sep_tpg;
938 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
939 int len;
940
941 memset(path, 0, ALUA_METADATA_PATH_LEN);
942 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
943
944 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
945 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
946
947 if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
948 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
949 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
950
951 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
952 "alua_tg_pt_status=0x%02x\n",
953 atomic_read(&port->sep_tg_pt_secondary_offline),
954 port->sep_tg_pt_secondary_stat);
955
956 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
957 TPG_TFO(se_tpg)->get_fabric_name(), wwn,
958 port->sep_lun->unpacked_lun);
959
960 return core_alua_write_tpg_metadata(path, md_buf, len);
961}
962
963static int core_alua_set_tg_pt_secondary_state(
964 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
965 struct se_port *port,
966 int explict,
967 int offline)
968{
969 struct t10_alua_tg_pt_gp *tg_pt_gp;
970 unsigned char *md_buf;
971 u32 md_buf_len;
972 int trans_delay_msecs;
973
974 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
975 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
976 if (!(tg_pt_gp)) {
977 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
978 printk(KERN_ERR "Unable to complete secondary state"
979 " transition\n");
980 return -1;
981 }
982 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
983 /*
984 * Set the secondary ALUA target port access state to OFFLINE
985 * or release the previously secondary state for struct se_port
986 */
987 if (offline)
988 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
989 else
990 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
991
992 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
993 port->sep_tg_pt_secondary_stat = (explict) ?
994 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
995 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
996
997 printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
998 " to secondary access state: %s\n", (explict) ? "explict" :
999 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1000 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1001
1002 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1003 /*
1004 * Do the optional transition delay after we set the secondary
1005 * ALUA access state.
1006 */
1007 if (trans_delay_msecs != 0)
1008 msleep_interruptible(trans_delay_msecs);
1009 /*
1010 * See if we need to update the ALUA fabric port metadata for
1011 * secondary state and status
1012 */
1013 if (port->sep_tg_pt_secondary_write_md) {
1014 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1015 if (!(md_buf)) {
1016 printk(KERN_ERR "Unable to allocate md_buf for"
1017 " secondary ALUA access metadata\n");
1018 return -1;
1019 }
1020 mutex_lock(&port->sep_tg_pt_md_mutex);
1021 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1022 md_buf, md_buf_len);
1023 mutex_unlock(&port->sep_tg_pt_md_mutex);
1024
1025 kfree(md_buf);
1026 }
1027
1028 return 0;
1029}
1030
1031struct t10_alua_lu_gp *
1032core_alua_allocate_lu_gp(const char *name, int def_group)
1033{
1034 struct t10_alua_lu_gp *lu_gp;
1035
1036 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1037 if (!(lu_gp)) {
1038 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
1039 return ERR_PTR(-ENOMEM);;
1040 }
1041 INIT_LIST_HEAD(&lu_gp->lu_gp_list);
1042 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1043 spin_lock_init(&lu_gp->lu_gp_lock);
1044 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1045
1046 if (def_group) {
1047 lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
1048 lu_gp->lu_gp_valid_id = 1;
1049 se_global->alua_lu_gps_count++;
1050 }
1051
1052 return lu_gp;
1053}
1054
1055int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1056{
1057 struct t10_alua_lu_gp *lu_gp_tmp;
1058 u16 lu_gp_id_tmp;
1059 /*
1060 * The lu_gp->lu_gp_id may only be set once..
1061 */
1062 if (lu_gp->lu_gp_valid_id) {
1063 printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
1064 " ignoring request\n");
1065 return -1;
1066 }
1067
1068 spin_lock(&se_global->lu_gps_lock);
1069 if (se_global->alua_lu_gps_count == 0x0000ffff) {
1070 printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
1071 " 0x0000ffff reached\n");
1072 spin_unlock(&se_global->lu_gps_lock);
1073 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1074 return -1;
1075 }
1076again:
1077 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1078 se_global->alua_lu_gps_counter++;
1079
1080 list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
1081 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1082 if (!(lu_gp_id))
1083 goto again;
1084
1085 printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
1086 " already exists, ignoring request\n",
1087 lu_gp_id);
1088 spin_unlock(&se_global->lu_gps_lock);
1089 return -1;
1090 }
1091 }
1092
1093 lu_gp->lu_gp_id = lu_gp_id_tmp;
1094 lu_gp->lu_gp_valid_id = 1;
1095 list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
1096 se_global->alua_lu_gps_count++;
1097 spin_unlock(&se_global->lu_gps_lock);
1098
1099 return 0;
1100}
1101
1102static struct t10_alua_lu_gp_member *
1103core_alua_allocate_lu_gp_mem(struct se_device *dev)
1104{
1105 struct t10_alua_lu_gp_member *lu_gp_mem;
1106
1107 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1108 if (!(lu_gp_mem)) {
1109 printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
1110 return ERR_PTR(-ENOMEM);
1111 }
1112 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1113 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1114 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1115
1116 lu_gp_mem->lu_gp_mem_dev = dev;
1117 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1118
1119 return lu_gp_mem;
1120}
1121
1122void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1123{
1124 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1125 /*
1126 * Once we have reached this point, config_item_put() has
1127 * already been called from target_core_alua_drop_lu_gp().
1128 *
1129 * Here, we remove the *lu_gp from the global list so that
1130 * no associations can be made while we are releasing
1131 * struct t10_alua_lu_gp.
1132 */
1133 spin_lock(&se_global->lu_gps_lock);
1134 atomic_set(&lu_gp->lu_gp_shutdown, 1);
1135 list_del(&lu_gp->lu_gp_list);
1136 se_global->alua_lu_gps_count--;
1137 spin_unlock(&se_global->lu_gps_lock);
1138 /*
1139 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1140 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1141 * released with core_alua_put_lu_gp_from_name()
1142 */
1143 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1144 cpu_relax();
1145 /*
1146 * Release reference to struct t10_alua_lu_gp * from all associated
1147 * struct se_device.
1148 */
1149 spin_lock(&lu_gp->lu_gp_lock);
1150 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1151 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1152 if (lu_gp_mem->lu_gp_assoc) {
1153 list_del(&lu_gp_mem->lu_gp_mem_list);
1154 lu_gp->lu_gp_members--;
1155 lu_gp_mem->lu_gp_assoc = 0;
1156 }
1157 spin_unlock(&lu_gp->lu_gp_lock);
1158 /*
1159 *
1160 * lu_gp_mem is assoicated with a single
1161 * struct se_device->dev_alua_lu_gp_mem, and is released when
1162 * struct se_device is released via core_alua_free_lu_gp_mem().
1163 *
1164 * If the passed lu_gp does NOT match the default_lu_gp, assume
1165 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1166 */
1167 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1168 if (lu_gp != se_global->default_lu_gp)
1169 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1170 se_global->default_lu_gp);
1171 else
1172 lu_gp_mem->lu_gp = NULL;
1173 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1174
1175 spin_lock(&lu_gp->lu_gp_lock);
1176 }
1177 spin_unlock(&lu_gp->lu_gp_lock);
1178
1179 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1180}
1181
1182void core_alua_free_lu_gp_mem(struct se_device *dev)
1183{
1184 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1185 struct t10_alua *alua = T10_ALUA(su_dev);
1186 struct t10_alua_lu_gp *lu_gp;
1187 struct t10_alua_lu_gp_member *lu_gp_mem;
1188
1189 if (alua->alua_type != SPC3_ALUA_EMULATED)
1190 return;
1191
1192 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1193 if (!(lu_gp_mem))
1194 return;
1195
1196 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1197 cpu_relax();
1198
1199 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1200 lu_gp = lu_gp_mem->lu_gp;
1201 if ((lu_gp)) {
1202 spin_lock(&lu_gp->lu_gp_lock);
1203 if (lu_gp_mem->lu_gp_assoc) {
1204 list_del(&lu_gp_mem->lu_gp_mem_list);
1205 lu_gp->lu_gp_members--;
1206 lu_gp_mem->lu_gp_assoc = 0;
1207 }
1208 spin_unlock(&lu_gp->lu_gp_lock);
1209 lu_gp_mem->lu_gp = NULL;
1210 }
1211 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1212
1213 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1214}
1215
1216struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1217{
1218 struct t10_alua_lu_gp *lu_gp;
1219 struct config_item *ci;
1220
1221 spin_lock(&se_global->lu_gps_lock);
1222 list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
1223 if (!(lu_gp->lu_gp_valid_id))
1224 continue;
1225 ci = &lu_gp->lu_gp_group.cg_item;
1226 if (!(strcmp(config_item_name(ci), name))) {
1227 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1228 spin_unlock(&se_global->lu_gps_lock);
1229 return lu_gp;
1230 }
1231 }
1232 spin_unlock(&se_global->lu_gps_lock);
1233
1234 return NULL;
1235}
1236
1237void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1238{
1239 spin_lock(&se_global->lu_gps_lock);
1240 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1241 spin_unlock(&se_global->lu_gps_lock);
1242}
1243
1244/*
1245 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1246 */
1247void __core_alua_attach_lu_gp_mem(
1248 struct t10_alua_lu_gp_member *lu_gp_mem,
1249 struct t10_alua_lu_gp *lu_gp)
1250{
1251 spin_lock(&lu_gp->lu_gp_lock);
1252 lu_gp_mem->lu_gp = lu_gp;
1253 lu_gp_mem->lu_gp_assoc = 1;
1254 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1255 lu_gp->lu_gp_members++;
1256 spin_unlock(&lu_gp->lu_gp_lock);
1257}
1258
1259/*
1260 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1261 */
1262void __core_alua_drop_lu_gp_mem(
1263 struct t10_alua_lu_gp_member *lu_gp_mem,
1264 struct t10_alua_lu_gp *lu_gp)
1265{
1266 spin_lock(&lu_gp->lu_gp_lock);
1267 list_del(&lu_gp_mem->lu_gp_mem_list);
1268 lu_gp_mem->lu_gp = NULL;
1269 lu_gp_mem->lu_gp_assoc = 0;
1270 lu_gp->lu_gp_members--;
1271 spin_unlock(&lu_gp->lu_gp_lock);
1272}
1273
1274struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1275 struct se_subsystem_dev *su_dev,
1276 const char *name,
1277 int def_group)
1278{
1279 struct t10_alua_tg_pt_gp *tg_pt_gp;
1280
1281 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1282 if (!(tg_pt_gp)) {
1283 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
1284 return NULL;
1285 }
1286 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1287 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1288 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1289 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1290 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1291 tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1292 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1293 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1294 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1295 /*
1296 * Enable both explict and implict ALUA support by default
1297 */
1298 tg_pt_gp->tg_pt_gp_alua_access_type =
1299 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1300 /*
1301 * Set the default Active/NonOptimized Delay in milliseconds
1302 */
1303 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1304 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1305
1306 if (def_group) {
1307 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1308 tg_pt_gp->tg_pt_gp_id =
1309 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
1310 tg_pt_gp->tg_pt_gp_valid_id = 1;
1311 T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
1312 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1313 &T10_ALUA(su_dev)->tg_pt_gps_list);
1314 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1315 }
1316
1317 return tg_pt_gp;
1318}
1319
1320int core_alua_set_tg_pt_gp_id(
1321 struct t10_alua_tg_pt_gp *tg_pt_gp,
1322 u16 tg_pt_gp_id)
1323{
1324 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1325 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1326 u16 tg_pt_gp_id_tmp;
1327 /*
1328 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1329 */
1330 if (tg_pt_gp->tg_pt_gp_valid_id) {
1331 printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
1332 " ignoring request\n");
1333 return -1;
1334 }
1335
1336 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1337 if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
1338 printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
1339 " 0x0000ffff reached\n");
1340 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1341 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1342 return -1;
1343 }
1344again:
1345 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1346 T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
1347
1348 list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
1349 tg_pt_gp_list) {
1350 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1351 if (!(tg_pt_gp_id))
1352 goto again;
1353
1354 printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
1355 " exists, ignoring request\n", tg_pt_gp_id);
1356 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1357 return -1;
1358 }
1359 }
1360
1361 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1362 tg_pt_gp->tg_pt_gp_valid_id = 1;
1363 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1364 &T10_ALUA(su_dev)->tg_pt_gps_list);
1365 T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
1366 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1367
1368 return 0;
1369}
1370
1371struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1372 struct se_port *port)
1373{
1374 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1375
1376 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1377 GFP_KERNEL);
1378 if (!(tg_pt_gp_mem)) {
1379 printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1380 return ERR_PTR(-ENOMEM);
1381 }
1382 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1383 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1384 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1385
1386 tg_pt_gp_mem->tg_pt = port;
1387 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1388 atomic_set(&port->sep_tg_pt_gp_active, 1);
1389
1390 return tg_pt_gp_mem;
1391}
1392
1393void core_alua_free_tg_pt_gp(
1394 struct t10_alua_tg_pt_gp *tg_pt_gp)
1395{
1396 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1397 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1398 /*
1399 * Once we have reached this point, config_item_put() has already
1400 * been called from target_core_alua_drop_tg_pt_gp().
1401 *
1402 * Here we remove *tg_pt_gp from the global list so that
1403 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1404 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1405 */
1406 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1407 list_del(&tg_pt_gp->tg_pt_gp_list);
1408 T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
1409 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1410 /*
1411 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1412 * core_alua_get_tg_pt_gp_by_name() in
1413 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1414 * to be released with core_alua_put_tg_pt_gp_from_name().
1415 */
1416 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1417 cpu_relax();
1418 /*
1419 * Release reference to struct t10_alua_tg_pt_gp from all associated
1420 * struct se_port.
1421 */
1422 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1423 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1424 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1425 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1426 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1427 tg_pt_gp->tg_pt_gp_members--;
1428 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1429 }
1430 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1431 /*
1432 * tg_pt_gp_mem is assoicated with a single
1433 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1434 * core_alua_free_tg_pt_gp_mem().
1435 *
1436 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1437 * assume we want to re-assocate a given tg_pt_gp_mem with
1438 * default_tg_pt_gp.
1439 */
1440 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1441 if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
1442 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1443 T10_ALUA(su_dev)->default_tg_pt_gp);
1444 } else
1445 tg_pt_gp_mem->tg_pt_gp = NULL;
1446 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1447
1448 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1449 }
1450 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1451
1452 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1453}
1454
1455void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1456{
1457 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1458 struct t10_alua *alua = T10_ALUA(su_dev);
1459 struct t10_alua_tg_pt_gp *tg_pt_gp;
1460 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1461
1462 if (alua->alua_type != SPC3_ALUA_EMULATED)
1463 return;
1464
1465 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1466 if (!(tg_pt_gp_mem))
1467 return;
1468
1469 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1470 cpu_relax();
1471
1472 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1473 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1474 if ((tg_pt_gp)) {
1475 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1476 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1477 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1478 tg_pt_gp->tg_pt_gp_members--;
1479 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1480 }
1481 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1482 tg_pt_gp_mem->tg_pt_gp = NULL;
1483 }
1484 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1485
1486 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1487}
1488
1489static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1490 struct se_subsystem_dev *su_dev,
1491 const char *name)
1492{
1493 struct t10_alua_tg_pt_gp *tg_pt_gp;
1494 struct config_item *ci;
1495
1496 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1497 list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
1498 tg_pt_gp_list) {
1499 if (!(tg_pt_gp->tg_pt_gp_valid_id))
1500 continue;
1501 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1502 if (!(strcmp(config_item_name(ci), name))) {
1503 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1504 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1505 return tg_pt_gp;
1506 }
1507 }
1508 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1509
1510 return NULL;
1511}
1512
1513static void core_alua_put_tg_pt_gp_from_name(
1514 struct t10_alua_tg_pt_gp *tg_pt_gp)
1515{
1516 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1517
1518 spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1519 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1520 spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
1521}
1522
1523/*
1524 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1525 */
1526void __core_alua_attach_tg_pt_gp_mem(
1527 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1528 struct t10_alua_tg_pt_gp *tg_pt_gp)
1529{
1530 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1531 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1532 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1533 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1534 &tg_pt_gp->tg_pt_gp_mem_list);
1535 tg_pt_gp->tg_pt_gp_members++;
1536 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1537}
1538
1539/*
1540 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1541 */
1542static void __core_alua_drop_tg_pt_gp_mem(
1543 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1544 struct t10_alua_tg_pt_gp *tg_pt_gp)
1545{
1546 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1547 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1548 tg_pt_gp_mem->tg_pt_gp = NULL;
1549 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1550 tg_pt_gp->tg_pt_gp_members--;
1551 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1552}
1553
1554ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1555{
1556 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1557 struct config_item *tg_pt_ci;
1558 struct t10_alua *alua = T10_ALUA(su_dev);
1559 struct t10_alua_tg_pt_gp *tg_pt_gp;
1560 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1561 ssize_t len = 0;
1562
1563 if (alua->alua_type != SPC3_ALUA_EMULATED)
1564 return len;
1565
1566 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1567 if (!(tg_pt_gp_mem))
1568 return len;
1569
1570 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1571 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1572 if ((tg_pt_gp)) {
1573 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1574 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1575 " %hu\nTG Port Primary Access State: %s\nTG Port "
1576 "Primary Access Status: %s\nTG Port Secondary Access"
1577 " State: %s\nTG Port Secondary Access Status: %s\n",
1578 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1579 core_alua_dump_state(atomic_read(
1580 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1581 core_alua_dump_status(
1582 tg_pt_gp->tg_pt_gp_alua_access_status),
1583 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1584 "Offline" : "None",
1585 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1586 }
1587 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1588
1589 return len;
1590}
1591
1592ssize_t core_alua_store_tg_pt_gp_info(
1593 struct se_port *port,
1594 const char *page,
1595 size_t count)
1596{
1597 struct se_portal_group *tpg;
1598 struct se_lun *lun;
1599 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1600 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1601 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1602 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1603 int move = 0;
1604
1605 tpg = port->sep_tpg;
1606 lun = port->sep_lun;
1607
1608 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
1609 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
1610 " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
1611 TPG_TFO(tpg)->tpg_get_tag(tpg),
1612 config_item_name(&lun->lun_group.cg_item));
1613 return -EINVAL;
1614 }
1615
1616 if (count > TG_PT_GROUP_NAME_BUF) {
1617 printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
1618 return -EINVAL;
1619 }
1620 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1621 memcpy(buf, page, count);
1622 /*
1623 * Any ALUA target port group alias besides "NULL" means we will be
1624 * making a new group association.
1625 */
1626 if (strcmp(strstrip(buf), "NULL")) {
1627 /*
1628 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1629 * struct t10_alua_tg_pt_gp. This reference is released with
1630 * core_alua_put_tg_pt_gp_from_name() below.
1631 */
1632 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1633 strstrip(buf));
1634 if (!(tg_pt_gp_new))
1635 return -ENODEV;
1636 }
1637 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1638 if (!(tg_pt_gp_mem)) {
1639 if (tg_pt_gp_new)
1640 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1641 printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1642 return -EINVAL;
1643 }
1644
1645 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1646 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1647 if ((tg_pt_gp)) {
1648 /*
1649 * Clearing an existing tg_pt_gp association, and replacing
1650 * with the default_tg_pt_gp.
1651 */
1652 if (!(tg_pt_gp_new)) {
1653 printk(KERN_INFO "Target_Core_ConfigFS: Moving"
1654 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1655 " alua/%s, ID: %hu back to"
1656 " default_tg_pt_gp\n",
1657 TPG_TFO(tpg)->tpg_get_wwn(tpg),
1658 TPG_TFO(tpg)->tpg_get_tag(tpg),
1659 config_item_name(&lun->lun_group.cg_item),
1660 config_item_name(
1661 &tg_pt_gp->tg_pt_gp_group.cg_item),
1662 tg_pt_gp->tg_pt_gp_id);
1663
1664 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1665 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1666 T10_ALUA(su_dev)->default_tg_pt_gp);
1667 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1668
1669 return count;
1670 }
1671 /*
1672 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1673 */
1674 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1675 move = 1;
1676 }
1677 /*
1678 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1679 */
1680 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1681 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1682 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1683 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1684 "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
1685 TPG_TFO(tpg)->tpg_get_tag(tpg),
1686 config_item_name(&lun->lun_group.cg_item),
1687 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1688 tg_pt_gp_new->tg_pt_gp_id);
1689
1690 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1691 return count;
1692}
1693
1694ssize_t core_alua_show_access_type(
1695 struct t10_alua_tg_pt_gp *tg_pt_gp,
1696 char *page)
1697{
1698 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1699 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1700 return sprintf(page, "Implict and Explict\n");
1701 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1702 return sprintf(page, "Implict\n");
1703 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1704 return sprintf(page, "Explict\n");
1705 else
1706 return sprintf(page, "None\n");
1707}
1708
1709ssize_t core_alua_store_access_type(
1710 struct t10_alua_tg_pt_gp *tg_pt_gp,
1711 const char *page,
1712 size_t count)
1713{
1714 unsigned long tmp;
1715 int ret;
1716
1717 ret = strict_strtoul(page, 0, &tmp);
1718 if (ret < 0) {
1719 printk(KERN_ERR "Unable to extract alua_access_type\n");
1720 return -EINVAL;
1721 }
1722 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1723 printk(KERN_ERR "Illegal value for alua_access_type:"
1724 " %lu\n", tmp);
1725 return -EINVAL;
1726 }
1727 if (tmp == 3)
1728 tg_pt_gp->tg_pt_gp_alua_access_type =
1729 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1730 else if (tmp == 2)
1731 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1732 else if (tmp == 1)
1733 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1734 else
1735 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1736
1737 return count;
1738}
1739
1740ssize_t core_alua_show_nonop_delay_msecs(
1741 struct t10_alua_tg_pt_gp *tg_pt_gp,
1742 char *page)
1743{
1744 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1745}
1746
1747ssize_t core_alua_store_nonop_delay_msecs(
1748 struct t10_alua_tg_pt_gp *tg_pt_gp,
1749 const char *page,
1750 size_t count)
1751{
1752 unsigned long tmp;
1753 int ret;
1754
1755 ret = strict_strtoul(page, 0, &tmp);
1756 if (ret < 0) {
1757 printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
1758 return -EINVAL;
1759 }
1760 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1761 printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
1762 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1763 ALUA_MAX_NONOP_DELAY_MSECS);
1764 return -EINVAL;
1765 }
1766 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1767
1768 return count;
1769}
1770
1771ssize_t core_alua_show_trans_delay_msecs(
1772 struct t10_alua_tg_pt_gp *tg_pt_gp,
1773 char *page)
1774{
1775 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1776}
1777
1778ssize_t core_alua_store_trans_delay_msecs(
1779 struct t10_alua_tg_pt_gp *tg_pt_gp,
1780 const char *page,
1781 size_t count)
1782{
1783 unsigned long tmp;
1784 int ret;
1785
1786 ret = strict_strtoul(page, 0, &tmp);
1787 if (ret < 0) {
1788 printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
1789 return -EINVAL;
1790 }
1791 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1792 printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
1793 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1794 ALUA_MAX_TRANS_DELAY_MSECS);
1795 return -EINVAL;
1796 }
1797 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1798
1799 return count;
1800}
1801
1802ssize_t core_alua_show_preferred_bit(
1803 struct t10_alua_tg_pt_gp *tg_pt_gp,
1804 char *page)
1805{
1806 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1807}
1808
1809ssize_t core_alua_store_preferred_bit(
1810 struct t10_alua_tg_pt_gp *tg_pt_gp,
1811 const char *page,
1812 size_t count)
1813{
1814 unsigned long tmp;
1815 int ret;
1816
1817 ret = strict_strtoul(page, 0, &tmp);
1818 if (ret < 0) {
1819 printk(KERN_ERR "Unable to extract preferred ALUA value\n");
1820 return -EINVAL;
1821 }
1822 if ((tmp != 0) && (tmp != 1)) {
1823 printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
1824 return -EINVAL;
1825 }
1826 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1827
1828 return count;
1829}
1830
1831ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1832{
1833 if (!(lun->lun_sep))
1834 return -ENODEV;
1835
1836 return sprintf(page, "%d\n",
1837 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1838}
1839
1840ssize_t core_alua_store_offline_bit(
1841 struct se_lun *lun,
1842 const char *page,
1843 size_t count)
1844{
1845 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1846 unsigned long tmp;
1847 int ret;
1848
1849 if (!(lun->lun_sep))
1850 return -ENODEV;
1851
1852 ret = strict_strtoul(page, 0, &tmp);
1853 if (ret < 0) {
1854 printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
1855 return -EINVAL;
1856 }
1857 if ((tmp != 0) && (tmp != 1)) {
1858 printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
1859 tmp);
1860 return -EINVAL;
1861 }
1862 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1863 if (!(tg_pt_gp_mem)) {
1864 printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
1865 return -EINVAL;
1866 }
1867
1868 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1869 lun->lun_sep, 0, (int)tmp);
1870 if (ret < 0)
1871 return -EINVAL;
1872
1873 return count;
1874}
1875
1876ssize_t core_alua_show_secondary_status(
1877 struct se_lun *lun,
1878 char *page)
1879{
1880 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1881}
1882
1883ssize_t core_alua_store_secondary_status(
1884 struct se_lun *lun,
1885 const char *page,
1886 size_t count)
1887{
1888 unsigned long tmp;
1889 int ret;
1890
1891 ret = strict_strtoul(page, 0, &tmp);
1892 if (ret < 0) {
1893 printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
1894 return -EINVAL;
1895 }
1896 if ((tmp != ALUA_STATUS_NONE) &&
1897 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1898 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1899 printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
1900 tmp);
1901 return -EINVAL;
1902 }
1903 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1904
1905 return count;
1906}
1907
1908ssize_t core_alua_show_secondary_write_metadata(
1909 struct se_lun *lun,
1910 char *page)
1911{
1912 return sprintf(page, "%d\n",
1913 lun->lun_sep->sep_tg_pt_secondary_write_md);
1914}
1915
1916ssize_t core_alua_store_secondary_write_metadata(
1917 struct se_lun *lun,
1918 const char *page,
1919 size_t count)
1920{
1921 unsigned long tmp;
1922 int ret;
1923
1924 ret = strict_strtoul(page, 0, &tmp);
1925 if (ret < 0) {
1926 printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
1927 return -EINVAL;
1928 }
1929 if ((tmp != 0) && (tmp != 1)) {
1930 printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
1931 " %lu\n", tmp);
1932 return -EINVAL;
1933 }
1934 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1935
1936 return count;
1937}
1938
1939int core_setup_alua(struct se_device *dev, int force_pt)
1940{
1941 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1942 struct t10_alua *alua = T10_ALUA(su_dev);
1943 struct t10_alua_lu_gp_member *lu_gp_mem;
1944 /*
1945 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
1946 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
1947 * cause a problem because libata and some SATA RAID HBAs appear
1948 * under Linux/SCSI, but emulate SCSI logic themselves.
1949 */
1950 if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
1951 !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
1952 alua->alua_type = SPC_ALUA_PASSTHROUGH;
1953 alua->alua_state_check = &core_alua_state_check_nop;
1954 printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
1955 " emulation\n", TRANSPORT(dev)->name);
1956 return 0;
1957 }
1958 /*
1959 * If SPC-3 or above is reported by real or emulated struct se_device,
1960 * use emulated ALUA.
1961 */
1962 if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
1963 printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
1964 " device\n", TRANSPORT(dev)->name);
1965 /*
1966 * Assoicate this struct se_device with the default ALUA
1967 * LUN Group.
1968 */
1969 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
1970 if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
1971 return -1;
1972
1973 alua->alua_type = SPC3_ALUA_EMULATED;
1974 alua->alua_state_check = &core_alua_state_check;
1975 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1976 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1977 se_global->default_lu_gp);
1978 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1979
1980 printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
1981 " core/alua/lu_gps/default_lu_gp\n",
1982 TRANSPORT(dev)->name);
1983 } else {
1984 alua->alua_type = SPC2_ALUA_DISABLED;
1985 alua->alua_state_check = &core_alua_state_check_nop;
1986 printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
1987 " device\n", TRANSPORT(dev)->name);
1988 }
1989
1990 return 0;
1991}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 000000000000..c86f97a081ed
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
1#ifndef TARGET_CORE_ALUA_H
2#define TARGET_CORE_ALUA_H
3
4/*
5 * INQUIRY response data, TPGS Field
6 *
7 * from spc4r17 section 6.4.2 Table 135
8 */
9#define TPGS_NO_ALUA 0x00
10#define TPGS_IMPLICT_ALUA 0x10
11#define TPGS_EXPLICT_ALUA 0x20
12
13/*
14 * ASYMMETRIC ACCESS STATE field
15 *
16 * from spc4r17 section 6.27 Table 245
17 */
18#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
19#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
20#define ALUA_ACCESS_STATE_STANDBY 0x2
21#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
22#define ALUA_ACCESS_STATE_OFFLINE 0xe
23#define ALUA_ACCESS_STATE_TRANSITION 0xf
24
25/*
26 * REPORT_TARGET_PORT_GROUP STATUS CODE
27 *
28 * from spc4r17 section 6.27 Table 246
29 */
30#define ALUA_STATUS_NONE 0x00
31#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
32#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
33
34/*
35 * From spc4r17, Table D.1: ASC and ASCQ Assignement
36 */
37#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
38#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
39#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
40#define ASCQ_04H_ALUA_OFFLINE 0x12
41
42/*
43 * Used as the default for Active/NonOptimized delay (in milliseconds)
44 * This can also be changed via configfs on a per target port group basis..
45 */
46#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
47#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
48/*
49 * Used for implict and explict ALUA transitional delay, that is disabled
50 * by default, and is intended to be used for debugging client side ALUA code.
51 */
52#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
53#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
54/*
55 * Used by core_alua_update_tpg_primary_metadata() and
56 * core_alua_update_tpg_secondary_metadata()
57 */
58#define ALUA_METADATA_PATH_LEN 512
59/*
60 * Used by core_alua_update_tpg_secondary_metadata()
61 */
62#define ALUA_SECONDARY_METADATA_WWN_LEN 256
63
64extern struct kmem_cache *t10_alua_lu_gp_cache;
65extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
66extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
67extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68
69extern int core_emulate_report_target_port_groups(struct se_cmd *);
70extern int core_emulate_set_target_port_groups(struct se_cmd *);
71extern int core_alua_check_nonop_delay(struct se_cmd *);
72extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
73 struct se_device *, struct se_port *,
74 struct se_node_acl *, int, int);
75extern char *core_alua_dump_status(int);
76extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
77extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
78extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
79extern void core_alua_free_lu_gp_mem(struct se_device *);
80extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
81extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
82extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
83 struct t10_alua_lu_gp *);
84extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
85 struct t10_alua_lu_gp *);
86extern void core_alua_drop_lu_gp_dev(struct se_device *);
87extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
88 struct se_subsystem_dev *, const char *, int);
89extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
90extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
91 struct se_port *);
92extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
93extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
94extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
95 struct t10_alua_tg_pt_gp *);
96extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
97extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
98 size_t);
99extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
100extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
101 const char *, size_t);
102extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
103 char *);
104extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
105 const char *, size_t);
106extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
107 char *);
108extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
109 const char *, size_t);
110extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
111 char *);
112extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
113 const char *, size_t);
114extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
115extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
116 size_t);
117extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
118extern ssize_t core_alua_store_secondary_status(struct se_lun *,
119 const char *, size_t);
120extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
121 char *);
122extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
123 const char *, size_t);
124extern int core_setup_alua(struct se_device *, int);
125
126#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 000000000000..366080baf474
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
1/*
2 * CDB emulation for non-READ/WRITE commands.
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#include <asm/unaligned.h>
27#include <scsi/scsi.h>
28
29#include <target/target_core_base.h>
30#include <target/target_core_transport.h>
31#include <target/target_core_fabric_ops.h>
32#include "target_core_ua.h"
33
34static void
35target_fill_alua_data(struct se_port *port, unsigned char *buf)
36{
37 struct t10_alua_tg_pt_gp *tg_pt_gp;
38 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
39
40 /*
41 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
42 */
43 buf[5] = 0x80;
44
45 /*
46 * Set TPGS field for explict and/or implict ALUA access type
47 * and opteration.
48 *
49 * See spc4r17 section 6.4.2 Table 135
50 */
51 if (!port)
52 return;
53 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
54 if (!tg_pt_gp_mem)
55 return;
56
57 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
58 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
59 if (tg_pt_gp)
60 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
61 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
62}
63
64static int
65target_emulate_inquiry_std(struct se_cmd *cmd)
66{
67 struct se_lun *lun = SE_LUN(cmd);
68 struct se_device *dev = SE_DEV(cmd);
69 unsigned char *buf = cmd->t_task->t_task_buf;
70
71 /*
72 * Make sure we at least have 6 bytes of INQUIRY response
73 * payload going back for EVPD=0
74 */
75 if (cmd->data_length < 6) {
76 printk(KERN_ERR "SCSI Inquiry payload length: %u"
77 " too small for EVPD=0\n", cmd->data_length);
78 return -1;
79 }
80
81 buf[0] = dev->transport->get_device_type(dev);
82 if (buf[0] == TYPE_TAPE)
83 buf[1] = 0x80;
84 buf[2] = dev->transport->get_device_rev(dev);
85
86 /*
87 * Enable SCCS and TPGS fields for Emulated ALUA
88 */
89 if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
90 target_fill_alua_data(lun->lun_sep, buf);
91
92 if (cmd->data_length < 8) {
93 buf[4] = 1; /* Set additional length to 1 */
94 return 0;
95 }
96
97 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
98
99 /*
100 * Do not include vendor, product, reversion info in INQUIRY
101 * response payload for cdbs with a small allocation length.
102 */
103 if (cmd->data_length < 36) {
104 buf[4] = 3; /* Set additional length to 3 */
105 return 0;
106 }
107
108 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
109 snprintf((unsigned char *)&buf[16], 16, "%s",
110 &DEV_T10_WWN(dev)->model[0]);
111 snprintf((unsigned char *)&buf[32], 4, "%s",
112 &DEV_T10_WWN(dev)->revision[0]);
113 buf[4] = 31; /* Set additional length to 31 */
114 return 0;
115}
116
117/* supported vital product data pages */
118static int
119target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
120{
121 buf[1] = 0x00;
122 if (cmd->data_length < 8)
123 return 0;
124
125 buf[4] = 0x0;
126 /*
127 * Only report the INQUIRY EVPD=1 pages after a valid NAA
128 * Registered Extended LUN WWN has been set via ConfigFS
129 * during device creation/restart.
130 */
131 if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
132 SDF_EMULATED_VPD_UNIT_SERIAL) {
133 buf[3] = 3;
134 buf[5] = 0x80;
135 buf[6] = 0x83;
136 buf[7] = 0x86;
137 }
138
139 return 0;
140}
141
142/* unit serial number */
143static int
144target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
145{
146 struct se_device *dev = SE_DEV(cmd);
147 u16 len = 0;
148
149 buf[1] = 0x80;
150 if (dev->se_sub_dev->su_dev_flags &
151 SDF_EMULATED_VPD_UNIT_SERIAL) {
152 u32 unit_serial_len;
153
154 unit_serial_len =
155 strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
156 unit_serial_len++; /* For NULL Terminator */
157
158 if (((len + 4) + unit_serial_len) > cmd->data_length) {
159 len += unit_serial_len;
160 buf[2] = ((len >> 8) & 0xff);
161 buf[3] = (len & 0xff);
162 return 0;
163 }
164 len += sprintf((unsigned char *)&buf[4], "%s",
165 &DEV_T10_WWN(dev)->unit_serial[0]);
166 len++; /* Extra Byte for NULL Terminator */
167 buf[3] = len;
168 }
169 return 0;
170}
171
172/*
173 * Device identification VPD, for a complete list of
174 * DESIGNATOR TYPEs see spc4r17 Table 459.
175 */
176static int
177target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
178{
179 struct se_device *dev = SE_DEV(cmd);
180 struct se_lun *lun = SE_LUN(cmd);
181 struct se_port *port = NULL;
182 struct se_portal_group *tpg = NULL;
183 struct t10_alua_lu_gp_member *lu_gp_mem;
184 struct t10_alua_tg_pt_gp *tg_pt_gp;
185 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
186 unsigned char binary, binary_new;
187 unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
188 u32 prod_len;
189 u32 unit_serial_len, off = 0;
190 int i;
191 u16 len = 0, id_len;
192
193 buf[1] = 0x83;
194 off = 4;
195
196 /*
197 * NAA IEEE Registered Extended Assigned designator format, see
198 * spc4r17 section 7.7.3.6.5
199 *
200 * We depend upon a target_core_mod/ConfigFS provided
201 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
202 * value in order to return the NAA id.
203 */
204 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
205 goto check_t10_vend_desc;
206
207 if (off + 20 > cmd->data_length)
208 goto check_t10_vend_desc;
209
210 /* CODE SET == Binary */
211 buf[off++] = 0x1;
212
213 /* Set ASSOICATION == addressed logical unit: 0)b */
214 buf[off] = 0x00;
215
216 /* Identifier/Designator type == NAA identifier */
217 buf[off++] = 0x3;
218 off++;
219
220 /* Identifier/Designator length */
221 buf[off++] = 0x10;
222
223 /*
224 * Start NAA IEEE Registered Extended Identifier/Designator
225 */
226 buf[off++] = (0x6 << 4);
227
228 /*
229 * Use OpenFabrics IEEE Company ID: 00 14 05
230 */
231 buf[off++] = 0x01;
232 buf[off++] = 0x40;
233 buf[off] = (0x5 << 4);
234
235 /*
236 * Return ConfigFS Unit Serial Number information for
237 * VENDOR_SPECIFIC_IDENTIFIER and
238 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
239 */
240 binary = transport_asciihex_to_binaryhex(
241 &DEV_T10_WWN(dev)->unit_serial[0]);
242 buf[off++] |= (binary & 0xf0) >> 4;
243 for (i = 0; i < 24; i += 2) {
244 binary_new = transport_asciihex_to_binaryhex(
245 &DEV_T10_WWN(dev)->unit_serial[i+2]);
246 buf[off] = (binary & 0x0f) << 4;
247 buf[off++] |= (binary_new & 0xf0) >> 4;
248 binary = binary_new;
249 }
250 len = 20;
251 off = (len + 4);
252
253check_t10_vend_desc:
254 /*
255 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
256 */
257 id_len = 8; /* For Vendor field */
258 prod_len = 4; /* For VPD Header */
259 prod_len += 8; /* For Vendor field */
260 prod_len += strlen(prod);
261 prod_len++; /* For : */
262
263 if (dev->se_sub_dev->su_dev_flags &
264 SDF_EMULATED_VPD_UNIT_SERIAL) {
265 unit_serial_len =
266 strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
267 unit_serial_len++; /* For NULL Terminator */
268
269 if ((len + (id_len + 4) +
270 (prod_len + unit_serial_len)) >
271 cmd->data_length) {
272 len += (prod_len + unit_serial_len);
273 goto check_port;
274 }
275 id_len += sprintf((unsigned char *)&buf[off+12],
276 "%s:%s", prod,
277 &DEV_T10_WWN(dev)->unit_serial[0]);
278 }
279 buf[off] = 0x2; /* ASCII */
280 buf[off+1] = 0x1; /* T10 Vendor ID */
281 buf[off+2] = 0x0;
282 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
283 /* Extra Byte for NULL Terminator */
284 id_len++;
285 /* Identifier Length */
286 buf[off+3] = id_len;
287 /* Header size for Designation descriptor */
288 len += (id_len + 4);
289 off += (id_len + 4);
290 /*
291 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
292 */
293check_port:
294 port = lun->lun_sep;
295 if (port) {
296 struct t10_alua_lu_gp *lu_gp;
297 u32 padding, scsi_name_len;
298 u16 lu_gp_id = 0;
299 u16 tg_pt_gp_id = 0;
300 u16 tpgt;
301
302 tpg = port->sep_tpg;
303 /*
304 * Relative target port identifer, see spc4r17
305 * section 7.7.3.7
306 *
307 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
308 * section 7.5.1 Table 362
309 */
310 if (((len + 4) + 8) > cmd->data_length) {
311 len += 8;
312 goto check_tpgi;
313 }
314 buf[off] =
315 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
316 buf[off++] |= 0x1; /* CODE SET == Binary */
317 buf[off] = 0x80; /* Set PIV=1 */
318 /* Set ASSOICATION == target port: 01b */
319 buf[off] |= 0x10;
320 /* DESIGNATOR TYPE == Relative target port identifer */
321 buf[off++] |= 0x4;
322 off++; /* Skip over Reserved */
323 buf[off++] = 4; /* DESIGNATOR LENGTH */
324 /* Skip over Obsolete field in RTPI payload
325 * in Table 472 */
326 off += 2;
327 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
328 buf[off++] = (port->sep_rtpi & 0xff);
329 len += 8; /* Header size + Designation descriptor */
330 /*
331 * Target port group identifier, see spc4r17
332 * section 7.7.3.8
333 *
334 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
335 * section 7.5.1 Table 362
336 */
337check_tpgi:
338 if (T10_ALUA(dev->se_sub_dev)->alua_type !=
339 SPC3_ALUA_EMULATED)
340 goto check_scsi_name;
341
342 if (((len + 4) + 8) > cmd->data_length) {
343 len += 8;
344 goto check_lu_gp;
345 }
346 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
347 if (!tg_pt_gp_mem)
348 goto check_lu_gp;
349
350 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
351 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
352 if (!(tg_pt_gp)) {
353 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
354 goto check_lu_gp;
355 }
356 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
357 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
358
359 buf[off] =
360 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
361 buf[off++] |= 0x1; /* CODE SET == Binary */
362 buf[off] = 0x80; /* Set PIV=1 */
363 /* Set ASSOICATION == target port: 01b */
364 buf[off] |= 0x10;
365 /* DESIGNATOR TYPE == Target port group identifier */
366 buf[off++] |= 0x5;
367 off++; /* Skip over Reserved */
368 buf[off++] = 4; /* DESIGNATOR LENGTH */
369 off += 2; /* Skip over Reserved Field */
370 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
371 buf[off++] = (tg_pt_gp_id & 0xff);
372 len += 8; /* Header size + Designation descriptor */
373 /*
374 * Logical Unit Group identifier, see spc4r17
375 * section 7.7.3.8
376 */
377check_lu_gp:
378 if (((len + 4) + 8) > cmd->data_length) {
379 len += 8;
380 goto check_scsi_name;
381 }
382 lu_gp_mem = dev->dev_alua_lu_gp_mem;
383 if (!(lu_gp_mem))
384 goto check_scsi_name;
385
386 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
387 lu_gp = lu_gp_mem->lu_gp;
388 if (!(lu_gp)) {
389 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
390 goto check_scsi_name;
391 }
392 lu_gp_id = lu_gp->lu_gp_id;
393 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
394
395 buf[off++] |= 0x1; /* CODE SET == Binary */
396 /* DESIGNATOR TYPE == Logical Unit Group identifier */
397 buf[off++] |= 0x6;
398 off++; /* Skip over Reserved */
399 buf[off++] = 4; /* DESIGNATOR LENGTH */
400 off += 2; /* Skip over Reserved Field */
401 buf[off++] = ((lu_gp_id >> 8) & 0xff);
402 buf[off++] = (lu_gp_id & 0xff);
403 len += 8; /* Header size + Designation descriptor */
404 /*
405 * SCSI name string designator, see spc4r17
406 * section 7.7.3.11
407 *
408 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
409 * section 7.5.1 Table 362
410 */
411check_scsi_name:
412 scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
413 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
414 scsi_name_len += 10;
415 /* Check for 4-byte padding */
416 padding = ((-scsi_name_len) & 3);
417 if (padding != 0)
418 scsi_name_len += padding;
419 /* Header size + Designation descriptor */
420 scsi_name_len += 4;
421
422 if (((len + 4) + scsi_name_len) > cmd->data_length) {
423 len += scsi_name_len;
424 goto set_len;
425 }
426 buf[off] =
427 (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
428 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
429 buf[off] = 0x80; /* Set PIV=1 */
430 /* Set ASSOICATION == target port: 01b */
431 buf[off] |= 0x10;
432 /* DESIGNATOR TYPE == SCSI name string */
433 buf[off++] |= 0x8;
434 off += 2; /* Skip over Reserved and length */
435 /*
436 * SCSI name string identifer containing, $FABRIC_MOD
437 * dependent information. For LIO-Target and iSCSI
438 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
439 * UTF-8 encoding.
440 */
441 tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
442 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
443 TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
444 scsi_name_len += 1 /* Include NULL terminator */;
445 /*
446 * The null-terminated, null-padded (see 4.4.2) SCSI
447 * NAME STRING field contains a UTF-8 format string.
448 * The number of bytes in the SCSI NAME STRING field
449 * (i.e., the value in the DESIGNATOR LENGTH field)
450 * shall be no larger than 256 and shall be a multiple
451 * of four.
452 */
453 if (padding)
454 scsi_name_len += padding;
455
456 buf[off-1] = scsi_name_len;
457 off += scsi_name_len;
458 /* Header size + Designation descriptor */
459 len += (scsi_name_len + 4);
460 }
461set_len:
462 buf[2] = ((len >> 8) & 0xff);
463 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
464 return 0;
465}
466
467/* Extended INQUIRY Data VPD Page */
468static int
469target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
470{
471 if (cmd->data_length < 60)
472 return 0;
473
474 buf[1] = 0x86;
475 buf[2] = 0x3c;
476 /* Set HEADSUP, ORDSUP, SIMPSUP */
477 buf[5] = 0x07;
478
479 /* If WriteCache emulation is enabled, set V_SUP */
480 if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
481 buf[6] = 0x01;
482 return 0;
483}
484
485/* Block Limits VPD page */
486static int
487target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
488{
489 struct se_device *dev = SE_DEV(cmd);
490 int have_tp = 0;
491
492 /*
493 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
494 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
495 * different page length for Thin Provisioning.
496 */
497 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
498 have_tp = 1;
499
500 if (cmd->data_length < (0x10 + 4)) {
501 printk(KERN_INFO "Received data_length: %u"
502 " too small for EVPD 0xb0\n",
503 cmd->data_length);
504 return -1;
505 }
506
507 if (have_tp && cmd->data_length < (0x3c + 4)) {
508 printk(KERN_INFO "Received data_length: %u"
509 " too small for TPE=1 EVPD 0xb0\n",
510 cmd->data_length);
511 have_tp = 0;
512 }
513
514 buf[0] = dev->transport->get_device_type(dev);
515 buf[1] = 0xb0;
516 buf[3] = have_tp ? 0x3c : 0x10;
517
518 /*
519 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
520 */
521 put_unaligned_be16(1, &buf[6]);
522
523 /*
524 * Set MAXIMUM TRANSFER LENGTH
525 */
526 put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
527
528 /*
529 * Set OPTIMAL TRANSFER LENGTH
530 */
531 put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
532
533 /*
534 * Exit now if we don't support TP or the initiator sent a too
535 * short buffer.
536 */
537 if (!have_tp || cmd->data_length < (0x3c + 4))
538 return 0;
539
540 /*
541 * Set MAXIMUM UNMAP LBA COUNT
542 */
543 put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
544
545 /*
546 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
547 */
548 put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
549 &buf[24]);
550
551 /*
552 * Set OPTIMAL UNMAP GRANULARITY
553 */
554 put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
555
556 /*
557 * UNMAP GRANULARITY ALIGNMENT
558 */
559 put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
560 &buf[32]);
561 if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
562 buf[32] |= 0x80; /* Set the UGAVALID bit */
563
564 return 0;
565}
566
567/* Thin Provisioning VPD */
568static int
569target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
570{
571 struct se_device *dev = SE_DEV(cmd);
572
573 /*
574 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
575 *
576 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
577 * zero, then the page length shall be set to 0004h. If the DP bit
578 * is set to one, then the page length shall be set to the value
579 * defined in table 162.
580 */
581 buf[0] = dev->transport->get_device_type(dev);
582 buf[1] = 0xb2;
583
584 /*
585 * Set Hardcoded length mentioned above for DP=0
586 */
587 put_unaligned_be16(0x0004, &buf[2]);
588
589 /*
590 * The THRESHOLD EXPONENT field indicates the threshold set size in
591 * LBAs as a power of 2 (i.e., the threshold set size is equal to
592 * 2(threshold exponent)).
593 *
594 * Note that this is currently set to 0x00 as mkp says it will be
595 * changing again. We can enable this once it has settled in T10
596 * and is actually used by Linux/SCSI ML code.
597 */
598 buf[4] = 0x00;
599
600 /*
601 * A TPU bit set to one indicates that the device server supports
602 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
603 * that the device server does not support the UNMAP command.
604 */
605 if (DEV_ATTRIB(dev)->emulate_tpu != 0)
606 buf[5] = 0x80;
607
608 /*
609 * A TPWS bit set to one indicates that the device server supports
610 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
611 * A TPWS bit set to zero indicates that the device server does not
612 * support the use of the WRITE SAME (16) command to unmap LBAs.
613 */
614 if (DEV_ATTRIB(dev)->emulate_tpws != 0)
615 buf[5] |= 0x40;
616
617 return 0;
618}
619
620static int
621target_emulate_inquiry(struct se_cmd *cmd)
622{
623 struct se_device *dev = SE_DEV(cmd);
624 unsigned char *buf = cmd->t_task->t_task_buf;
625 unsigned char *cdb = cmd->t_task->t_task_cdb;
626
627 if (!(cdb[1] & 0x1))
628 return target_emulate_inquiry_std(cmd);
629
630 /*
631 * Make sure we at least have 4 bytes of INQUIRY response
632 * payload for 0x00 going back for EVPD=1. Note that 0x80
633 * and 0x83 will check for enough payload data length and
634 * jump to set_len: label when there is not enough inquiry EVPD
635 * payload length left for the next outgoing EVPD metadata
636 */
637 if (cmd->data_length < 4) {
638 printk(KERN_ERR "SCSI Inquiry payload length: %u"
639 " too small for EVPD=1\n", cmd->data_length);
640 return -1;
641 }
642 buf[0] = dev->transport->get_device_type(dev);
643
644 switch (cdb[2]) {
645 case 0x00:
646 return target_emulate_evpd_00(cmd, buf);
647 case 0x80:
648 return target_emulate_evpd_80(cmd, buf);
649 case 0x83:
650 return target_emulate_evpd_83(cmd, buf);
651 case 0x86:
652 return target_emulate_evpd_86(cmd, buf);
653 case 0xb0:
654 return target_emulate_evpd_b0(cmd, buf);
655 case 0xb2:
656 return target_emulate_evpd_b2(cmd, buf);
657 default:
658 printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
659 return -1;
660 }
661
662 return 0;
663}
664
665static int
666target_emulate_readcapacity(struct se_cmd *cmd)
667{
668 struct se_device *dev = SE_DEV(cmd);
669 unsigned char *buf = cmd->t_task->t_task_buf;
670 u32 blocks = dev->transport->get_blocks(dev);
671
672 buf[0] = (blocks >> 24) & 0xff;
673 buf[1] = (blocks >> 16) & 0xff;
674 buf[2] = (blocks >> 8) & 0xff;
675 buf[3] = blocks & 0xff;
676 buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
677 buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
678 buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
679 buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
680 /*
681 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
682 */
683 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
684 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
685
686 return 0;
687}
688
689static int
690target_emulate_readcapacity_16(struct se_cmd *cmd)
691{
692 struct se_device *dev = SE_DEV(cmd);
693 unsigned char *buf = cmd->t_task->t_task_buf;
694 unsigned long long blocks = dev->transport->get_blocks(dev);
695
696 buf[0] = (blocks >> 56) & 0xff;
697 buf[1] = (blocks >> 48) & 0xff;
698 buf[2] = (blocks >> 40) & 0xff;
699 buf[3] = (blocks >> 32) & 0xff;
700 buf[4] = (blocks >> 24) & 0xff;
701 buf[5] = (blocks >> 16) & 0xff;
702 buf[6] = (blocks >> 8) & 0xff;
703 buf[7] = blocks & 0xff;
704 buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
705 buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
706 buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
707 buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
708 /*
709 * Set Thin Provisioning Enable bit following sbc3r22 in section
710 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
711 */
712 if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
713 buf[14] = 0x80;
714
715 return 0;
716}
717
718static int
719target_modesense_rwrecovery(unsigned char *p)
720{
721 p[0] = 0x01;
722 p[1] = 0x0a;
723
724 return 12;
725}
726
727static int
728target_modesense_control(struct se_device *dev, unsigned char *p)
729{
730 p[0] = 0x0a;
731 p[1] = 0x0a;
732 p[2] = 2;
733 /*
734 * From spc4r17, section 7.4.6 Control mode Page
735 *
736 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
737 *
738 * 00b: The logical unit shall clear any unit attention condition
739 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
740 * status and shall not establish a unit attention condition when a com-
741 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
742 * status.
743 *
744 * 10b: The logical unit shall not clear any unit attention condition
745 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
746 * status and shall not establish a unit attention condition when
747 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
748 * CONFLICT status.
749 *
750 * 11b a The logical unit shall not clear any unit attention condition
751 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
752 * status and shall establish a unit attention condition for the
753 * initiator port associated with the I_T nexus on which the BUSY,
754 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
755 * Depending on the status, the additional sense code shall be set to
756 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
757 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
758 * command, a unit attention condition shall be established only once
759 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
760 * to the number of commands completed with one of those status codes.
761 */
762 p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
763 (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
764 /*
765 * From spc4r17, section 7.4.6 Control mode Page
766 *
767 * Task Aborted Status (TAS) bit set to zero.
768 *
769 * A task aborted status (TAS) bit set to zero specifies that aborted
770 * tasks shall be terminated by the device server without any response
771 * to the application client. A TAS bit set to one specifies that tasks
772 * aborted by the actions of an I_T nexus other than the I_T nexus on
773 * which the command was received shall be completed with TASK ABORTED
774 * status (see SAM-4).
775 */
776 p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
777 p[8] = 0xff;
778 p[9] = 0xff;
779 p[11] = 30;
780
781 return 12;
782}
783
784static int
785target_modesense_caching(struct se_device *dev, unsigned char *p)
786{
787 p[0] = 0x08;
788 p[1] = 0x12;
789 if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
790 p[2] = 0x04; /* Write Cache Enable */
791 p[12] = 0x20; /* Disabled Read Ahead */
792
793 return 20;
794}
795
796static void
797target_modesense_write_protect(unsigned char *buf, int type)
798{
799 /*
800 * I believe that the WP bit (bit 7) in the mode header is the same for
801 * all device types..
802 */
803 switch (type) {
804 case TYPE_DISK:
805 case TYPE_TAPE:
806 default:
807 buf[0] |= 0x80; /* WP bit */
808 break;
809 }
810}
811
812static void
813target_modesense_dpofua(unsigned char *buf, int type)
814{
815 switch (type) {
816 case TYPE_DISK:
817 buf[0] |= 0x10; /* DPOFUA bit */
818 break;
819 default:
820 break;
821 }
822}
823
824static int
825target_emulate_modesense(struct se_cmd *cmd, int ten)
826{
827 struct se_device *dev = SE_DEV(cmd);
828 char *cdb = cmd->t_task->t_task_cdb;
829 unsigned char *rbuf = cmd->t_task->t_task_buf;
830 int type = dev->transport->get_device_type(dev);
831 int offset = (ten) ? 8 : 4;
832 int length = 0;
833 unsigned char buf[SE_MODE_PAGE_BUF];
834
835 memset(buf, 0, SE_MODE_PAGE_BUF);
836
837 switch (cdb[2] & 0x3f) {
838 case 0x01:
839 length = target_modesense_rwrecovery(&buf[offset]);
840 break;
841 case 0x08:
842 length = target_modesense_caching(dev, &buf[offset]);
843 break;
844 case 0x0a:
845 length = target_modesense_control(dev, &buf[offset]);
846 break;
847 case 0x3f:
848 length = target_modesense_rwrecovery(&buf[offset]);
849 length += target_modesense_caching(dev, &buf[offset+length]);
850 length += target_modesense_control(dev, &buf[offset+length]);
851 break;
852 default:
853 printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
854 cdb[2] & 0x3f);
855 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
856 }
857 offset += length;
858
859 if (ten) {
860 offset -= 2;
861 buf[0] = (offset >> 8) & 0xff;
862 buf[1] = offset & 0xff;
863
864 if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
865 (cmd->se_deve &&
866 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
867 target_modesense_write_protect(&buf[3], type);
868
869 if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
870 (DEV_ATTRIB(dev)->emulate_fua_write > 0))
871 target_modesense_dpofua(&buf[3], type);
872
873 if ((offset + 2) > cmd->data_length)
874 offset = cmd->data_length;
875
876 } else {
877 offset -= 1;
878 buf[0] = offset & 0xff;
879
880 if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
881 (cmd->se_deve &&
882 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
883 target_modesense_write_protect(&buf[2], type);
884
885 if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
886 (DEV_ATTRIB(dev)->emulate_fua_write > 0))
887 target_modesense_dpofua(&buf[2], type);
888
889 if ((offset + 1) > cmd->data_length)
890 offset = cmd->data_length;
891 }
892 memcpy(rbuf, buf, offset);
893
894 return 0;
895}
896
897static int
898target_emulate_request_sense(struct se_cmd *cmd)
899{
900 unsigned char *cdb = cmd->t_task->t_task_cdb;
901 unsigned char *buf = cmd->t_task->t_task_buf;
902 u8 ua_asc = 0, ua_ascq = 0;
903
904 if (cdb[1] & 0x01) {
905 printk(KERN_ERR "REQUEST_SENSE description emulation not"
906 " supported\n");
907 return PYX_TRANSPORT_INVALID_CDB_FIELD;
908 }
909 if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
910 /*
911 * CURRENT ERROR, UNIT ATTENTION
912 */
913 buf[0] = 0x70;
914 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
915 /*
916 * Make sure request data length is enough for additional
917 * sense data.
918 */
919 if (cmd->data_length <= 18) {
920 buf[7] = 0x00;
921 return 0;
922 }
923 /*
924 * The Additional Sense Code (ASC) from the UNIT ATTENTION
925 */
926 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
927 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
928 buf[7] = 0x0A;
929 } else {
930 /*
931 * CURRENT ERROR, NO SENSE
932 */
933 buf[0] = 0x70;
934 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
935 /*
936 * Make sure request data length is enough for additional
937 * sense data.
938 */
939 if (cmd->data_length <= 18) {
940 buf[7] = 0x00;
941 return 0;
942 }
943 /*
944 * NO ADDITIONAL SENSE INFORMATION
945 */
946 buf[SPC_ASC_KEY_OFFSET] = 0x00;
947 buf[7] = 0x0A;
948 }
949
950 return 0;
951}
952
953/*
954 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
955 * Note this is not used for TCM/pSCSI passthrough
956 */
957static int
958target_emulate_unmap(struct se_task *task)
959{
960 struct se_cmd *cmd = TASK_CMD(task);
961 struct se_device *dev = SE_DEV(cmd);
962 unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
963 unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
964 sector_t lba;
965 unsigned int size = cmd->data_length, range;
966 int ret, offset;
967 unsigned short dl, bd_dl;
968
969 /* First UNMAP block descriptor starts at 8 byte offset */
970 offset = 8;
971 size -= 8;
972 dl = get_unaligned_be16(&cdb[0]);
973 bd_dl = get_unaligned_be16(&cdb[2]);
974 ptr = &buf[offset];
975 printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
976 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
977
978 while (size) {
979 lba = get_unaligned_be64(&ptr[0]);
980 range = get_unaligned_be32(&ptr[8]);
981 printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
982 (unsigned long long)lba, range);
983
984 ret = dev->transport->do_discard(dev, lba, range);
985 if (ret < 0) {
986 printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
987 ret);
988 return -1;
989 }
990
991 ptr += 16;
992 size -= 16;
993 }
994
995 task->task_scsi_status = GOOD;
996 transport_complete_task(task, 1);
997 return 0;
998}
999
1000/*
1001 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1002 * Note this is not used for TCM/pSCSI passthrough
1003 */
1004static int
1005target_emulate_write_same(struct se_task *task)
1006{
1007 struct se_cmd *cmd = TASK_CMD(task);
1008 struct se_device *dev = SE_DEV(cmd);
1009 sector_t lba = cmd->t_task->t_task_lba;
1010 unsigned int range;
1011 int ret;
1012
1013 range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
1014
1015 printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
1016 (unsigned long long)lba, range);
1017
1018 ret = dev->transport->do_discard(dev, lba, range);
1019 if (ret < 0) {
1020 printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
1021 return -1;
1022 }
1023
1024 task->task_scsi_status = GOOD;
1025 transport_complete_task(task, 1);
1026 return 0;
1027}
1028
1029int
1030transport_emulate_control_cdb(struct se_task *task)
1031{
1032 struct se_cmd *cmd = TASK_CMD(task);
1033 struct se_device *dev = SE_DEV(cmd);
1034 unsigned short service_action;
1035 int ret = 0;
1036
1037 switch (cmd->t_task->t_task_cdb[0]) {
1038 case INQUIRY:
1039 ret = target_emulate_inquiry(cmd);
1040 break;
1041 case READ_CAPACITY:
1042 ret = target_emulate_readcapacity(cmd);
1043 break;
1044 case MODE_SENSE:
1045 ret = target_emulate_modesense(cmd, 0);
1046 break;
1047 case MODE_SENSE_10:
1048 ret = target_emulate_modesense(cmd, 1);
1049 break;
1050 case SERVICE_ACTION_IN:
1051 switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
1052 case SAI_READ_CAPACITY_16:
1053 ret = target_emulate_readcapacity_16(cmd);
1054 break;
1055 default:
1056 printk(KERN_ERR "Unsupported SA: 0x%02x\n",
1057 cmd->t_task->t_task_cdb[1] & 0x1f);
1058 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1059 }
1060 break;
1061 case REQUEST_SENSE:
1062 ret = target_emulate_request_sense(cmd);
1063 break;
1064 case UNMAP:
1065 if (!dev->transport->do_discard) {
1066 printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
1067 dev->transport->name);
1068 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1069 }
1070 ret = target_emulate_unmap(task);
1071 break;
1072 case WRITE_SAME_16:
1073 if (!dev->transport->do_discard) {
1074 printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
1075 " for: %s\n", dev->transport->name);
1076 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1077 }
1078 ret = target_emulate_write_same(task);
1079 break;
1080 case VARIABLE_LENGTH_CMD:
1081 service_action =
1082 get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
1083 switch (service_action) {
1084 case WRITE_SAME_32:
1085 if (!dev->transport->do_discard) {
1086 printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
1087 " supported for: %s\n",
1088 dev->transport->name);
1089 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1090 }
1091 ret = target_emulate_write_same(task);
1092 break;
1093 default:
1094 printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
1095 " 0x%02x\n", service_action);
1096 break;
1097 }
1098 break;
1099 case SYNCHRONIZE_CACHE:
1100 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1101 if (!dev->transport->do_sync_cache) {
1102 printk(KERN_ERR
1103 "SYNCHRONIZE_CACHE emulation not supported"
1104 " for: %s\n", dev->transport->name);
1105 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1106 }
1107 dev->transport->do_sync_cache(task);
1108 break;
1109 case ALLOW_MEDIUM_REMOVAL:
1110 case ERASE:
1111 case REZERO_UNIT:
1112 case SEEK_10:
1113 case SPACE:
1114 case START_STOP:
1115 case TEST_UNIT_READY:
1116 case VERIFY:
1117 case WRITE_FILEMARKS:
1118 break;
1119 default:
1120 printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
1121 cmd->t_task->t_task_cdb[0], dev->transport->name);
1122 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1123 }
1124
1125 if (ret < 0)
1126 return ret;
1127 task->task_scsi_status = GOOD;
1128 transport_complete_task(task, 1);
1129
1130 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1131}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 000000000000..2764510798b0
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3225 @@
1/*******************************************************************************
2 * Filename: target_core_configfs.c
3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 *
6 * Copyright (c) 2008-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <generated/utsrelease.h>
28#include <linux/utsname.h>
29#include <linux/init.h>
30#include <linux/fs.h>
31#include <linux/namei.h>
32#include <linux/slab.h>
33#include <linux/types.h>
34#include <linux/delay.h>
35#include <linux/unistd.h>
36#include <linux/string.h>
37#include <linux/parser.h>
38#include <linux/syscalls.h>
39#include <linux/configfs.h>
40#include <linux/proc_fs.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_device.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46#include <target/target_core_fabric_configfs.h>
47#include <target/target_core_configfs.h>
48#include <target/configfs_macros.h>
49
50#include "target_core_alua.h"
51#include "target_core_hba.h"
52#include "target_core_pr.h"
53#include "target_core_rd.h"
54
55static struct list_head g_tf_list;
56static struct mutex g_tf_lock;
57
58struct target_core_configfs_attribute {
59 struct configfs_attribute attr;
60 ssize_t (*show)(void *, char *);
61 ssize_t (*store)(void *, const char *, size_t);
62};
63
64static inline struct se_hba *
65item_to_hba(struct config_item *item)
66{
67 return container_of(to_config_group(item), struct se_hba, hba_group);
68}
69
70/*
71 * Attributes for /sys/kernel/config/target/
72 */
73static ssize_t target_core_attr_show(struct config_item *item,
74 struct configfs_attribute *attr,
75 char *page)
76{
77 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
78 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
79 utsname()->sysname, utsname()->machine);
80}
81
82static struct configfs_item_operations target_core_fabric_item_ops = {
83 .show_attribute = target_core_attr_show,
84};
85
86static struct configfs_attribute target_core_item_attr_version = {
87 .ca_owner = THIS_MODULE,
88 .ca_name = "version",
89 .ca_mode = S_IRUGO,
90};
91
92static struct target_fabric_configfs *target_core_get_fabric(
93 const char *name)
94{
95 struct target_fabric_configfs *tf;
96
97 if (!(name))
98 return NULL;
99
100 mutex_lock(&g_tf_lock);
101 list_for_each_entry(tf, &g_tf_list, tf_list) {
102 if (!(strcmp(tf->tf_name, name))) {
103 atomic_inc(&tf->tf_access_cnt);
104 mutex_unlock(&g_tf_lock);
105 return tf;
106 }
107 }
108 mutex_unlock(&g_tf_lock);
109
110 return NULL;
111}
112
113/*
114 * Called from struct target_core_group_ops->make_group()
115 */
116static struct config_group *target_core_register_fabric(
117 struct config_group *group,
118 const char *name)
119{
120 struct target_fabric_configfs *tf;
121 int ret;
122
123 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
124 " %s\n", group, name);
125 /*
126 * Ensure that TCM subsystem plugins are loaded at this point for
127 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
128 * LUN symlinks.
129 */
130 if (transport_subsystem_check_init() < 0)
131 return ERR_PTR(-EINVAL);
132
133 /*
134 * Below are some hardcoded request_module() calls to automatically
135 * local fabric modules when the following is called:
136 *
137 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
138 *
139 * Note that this does not limit which TCM fabric module can be
140 * registered, but simply provids auto loading logic for modules with
141 * mkdir(2) system calls with known TCM fabric modules.
142 */
143 if (!(strncmp(name, "iscsi", 5))) {
144 /*
145 * Automatically load the LIO Target fabric module when the
146 * following is called:
147 *
148 * mkdir -p $CONFIGFS/target/iscsi
149 */
150 ret = request_module("iscsi_target_mod");
151 if (ret < 0) {
152 printk(KERN_ERR "request_module() failed for"
153 " iscsi_target_mod.ko: %d\n", ret);
154 return ERR_PTR(-EINVAL);
155 }
156 } else if (!(strncmp(name, "loopback", 8))) {
157 /*
158 * Automatically load the tcm_loop fabric module when the
159 * following is called:
160 *
161 * mkdir -p $CONFIGFS/target/loopback
162 */
163 ret = request_module("tcm_loop");
164 if (ret < 0) {
165 printk(KERN_ERR "request_module() failed for"
166 " tcm_loop.ko: %d\n", ret);
167 return ERR_PTR(-EINVAL);
168 }
169 }
170
171 tf = target_core_get_fabric(name);
172 if (!(tf)) {
173 printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
174 name);
175 return ERR_PTR(-EINVAL);
176 }
177 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
178 " %s\n", tf->tf_name);
179 /*
180 * On a successful target_core_get_fabric() look, the returned
181 * struct target_fabric_configfs *tf will contain a usage reference.
182 */
183 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
184 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
185
186 tf->tf_group.default_groups = tf->tf_default_groups;
187 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
188 tf->tf_group.default_groups[1] = NULL;
189
190 config_group_init_type_name(&tf->tf_group, name,
191 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
192 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
193 &TF_CIT_TMPL(tf)->tfc_discovery_cit);
194
195 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
196 " %s\n", tf->tf_group.cg_item.ci_name);
197 /*
198 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
199 */
200 tf->tf_ops.tf_subsys = tf->tf_subsys;
201 tf->tf_fabric = &tf->tf_group.cg_item;
202 printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
203 " for %s\n", name);
204
205 return &tf->tf_group;
206}
207
208/*
209 * Called from struct target_core_group_ops->drop_item()
210 */
211static void target_core_deregister_fabric(
212 struct config_group *group,
213 struct config_item *item)
214{
215 struct target_fabric_configfs *tf = container_of(
216 to_config_group(item), struct target_fabric_configfs, tf_group);
217 struct config_group *tf_group;
218 struct config_item *df_item;
219 int i;
220
221 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
222 " tf list\n", config_item_name(item));
223
224 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
225 " %s\n", tf->tf_name);
226 atomic_dec(&tf->tf_access_cnt);
227
228 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
229 " tf->tf_fabric for %s\n", tf->tf_name);
230 tf->tf_fabric = NULL;
231
232 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
233 " %s\n", config_item_name(item));
234
235 tf_group = &tf->tf_group;
236 for (i = 0; tf_group->default_groups[i]; i++) {
237 df_item = &tf_group->default_groups[i]->cg_item;
238 tf_group->default_groups[i] = NULL;
239 config_item_put(df_item);
240 }
241 config_item_put(item);
242}
243
244static struct configfs_group_operations target_core_fabric_group_ops = {
245 .make_group = &target_core_register_fabric,
246 .drop_item = &target_core_deregister_fabric,
247};
248
249/*
250 * All item attributes appearing in /sys/kernel/target/ appear here.
251 */
252static struct configfs_attribute *target_core_fabric_item_attrs[] = {
253 &target_core_item_attr_version,
254 NULL,
255};
256
257/*
258 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
259 */
260static struct config_item_type target_core_fabrics_item = {
261 .ct_item_ops = &target_core_fabric_item_ops,
262 .ct_group_ops = &target_core_fabric_group_ops,
263 .ct_attrs = target_core_fabric_item_attrs,
264 .ct_owner = THIS_MODULE,
265};
266
267static struct configfs_subsystem target_core_fabrics = {
268 .su_group = {
269 .cg_item = {
270 .ci_namebuf = "target",
271 .ci_type = &target_core_fabrics_item,
272 },
273 },
274};
275
276static struct configfs_subsystem *target_core_subsystem[] = {
277 &target_core_fabrics,
278 NULL,
279};
280
281/*##############################################################################
282// Start functions called by external Target Fabrics Modules
283//############################################################################*/
284
285/*
286 * First function called by fabric modules to:
287 *
288 * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
289 * 2) Add struct target_fabric_configfs to g_tf_list
290 * 3) Return struct target_fabric_configfs to fabric module to be passed
291 * into target_fabric_configfs_register().
292 */
293struct target_fabric_configfs *target_fabric_configfs_init(
294 struct module *fabric_mod,
295 const char *name)
296{
297 struct target_fabric_configfs *tf;
298
299 if (!(fabric_mod)) {
300 printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
301 return NULL;
302 }
303 if (!(name)) {
304 printk(KERN_ERR "Unable to locate passed fabric name\n");
305 return NULL;
306 }
307 if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
309 "_NAME_SIZE\n", name);
310 return NULL;
311 }
312
313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
314 if (!(tf))
315 return ERR_PTR(-ENOMEM);
316
317 INIT_LIST_HEAD(&tf->tf_list);
318 atomic_set(&tf->tf_access_cnt, 0);
319 /*
320 * Setup the default generic struct config_item_type's (cits) in
321 * struct target_fabric_configfs->tf_cit_tmpl
322 */
323 tf->tf_module = fabric_mod;
324 target_fabric_setup_cits(tf);
325
326 tf->tf_subsys = target_core_subsystem[0];
327 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
328
329 mutex_lock(&g_tf_lock);
330 list_add_tail(&tf->tf_list, &g_tf_list);
331 mutex_unlock(&g_tf_lock);
332
333 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
334 ">>>>>>>>>>>>>>\n");
335 printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
336 " %s\n", tf, tf->tf_name);
337 return tf;
338}
339EXPORT_SYMBOL(target_fabric_configfs_init);
340
341/*
342 * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
343 */
344void target_fabric_configfs_free(
345 struct target_fabric_configfs *tf)
346{
347 mutex_lock(&g_tf_lock);
348 list_del(&tf->tf_list);
349 mutex_unlock(&g_tf_lock);
350
351 kfree(tf);
352}
353EXPORT_SYMBOL(target_fabric_configfs_free);
354
355/*
356 * Perform a sanity check of the passed tf->tf_ops before completing
357 * TCM fabric module registration.
358 */
359static int target_fabric_tf_ops_check(
360 struct target_fabric_configfs *tf)
361{
362 struct target_core_fabric_ops *tfo = &tf->tf_ops;
363
364 if (!(tfo->get_fabric_name)) {
365 printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
366 return -EINVAL;
367 }
368 if (!(tfo->get_fabric_proto_ident)) {
369 printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
370 return -EINVAL;
371 }
372 if (!(tfo->tpg_get_wwn)) {
373 printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
374 return -EINVAL;
375 }
376 if (!(tfo->tpg_get_tag)) {
377 printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
378 return -EINVAL;
379 }
380 if (!(tfo->tpg_get_default_depth)) {
381 printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
382 return -EINVAL;
383 }
384 if (!(tfo->tpg_get_pr_transport_id)) {
385 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
386 return -EINVAL;
387 }
388 if (!(tfo->tpg_get_pr_transport_id_len)) {
389 printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
390 return -EINVAL;
391 }
392 if (!(tfo->tpg_check_demo_mode)) {
393 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
394 return -EINVAL;
395 }
396 if (!(tfo->tpg_check_demo_mode_cache)) {
397 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
398 return -EINVAL;
399 }
400 if (!(tfo->tpg_check_demo_mode_write_protect)) {
401 printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
402 return -EINVAL;
403 }
404 if (!(tfo->tpg_check_prod_mode_write_protect)) {
405 printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
406 return -EINVAL;
407 }
408 if (!(tfo->tpg_alloc_fabric_acl)) {
409 printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
410 return -EINVAL;
411 }
412 if (!(tfo->tpg_release_fabric_acl)) {
413 printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
414 return -EINVAL;
415 }
416 if (!(tfo->tpg_get_inst_index)) {
417 printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
418 return -EINVAL;
419 }
420 if (!(tfo->release_cmd_to_pool)) {
421 printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
422 return -EINVAL;
423 }
424 if (!(tfo->release_cmd_direct)) {
425 printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
426 return -EINVAL;
427 }
428 if (!(tfo->shutdown_session)) {
429 printk(KERN_ERR "Missing tfo->shutdown_session()\n");
430 return -EINVAL;
431 }
432 if (!(tfo->close_session)) {
433 printk(KERN_ERR "Missing tfo->close_session()\n");
434 return -EINVAL;
435 }
436 if (!(tfo->stop_session)) {
437 printk(KERN_ERR "Missing tfo->stop_session()\n");
438 return -EINVAL;
439 }
440 if (!(tfo->fall_back_to_erl0)) {
441 printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
442 return -EINVAL;
443 }
444 if (!(tfo->sess_logged_in)) {
445 printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
446 return -EINVAL;
447 }
448 if (!(tfo->sess_get_index)) {
449 printk(KERN_ERR "Missing tfo->sess_get_index()\n");
450 return -EINVAL;
451 }
452 if (!(tfo->write_pending)) {
453 printk(KERN_ERR "Missing tfo->write_pending()\n");
454 return -EINVAL;
455 }
456 if (!(tfo->write_pending_status)) {
457 printk(KERN_ERR "Missing tfo->write_pending_status()\n");
458 return -EINVAL;
459 }
460 if (!(tfo->set_default_node_attributes)) {
461 printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
462 return -EINVAL;
463 }
464 if (!(tfo->get_task_tag)) {
465 printk(KERN_ERR "Missing tfo->get_task_tag()\n");
466 return -EINVAL;
467 }
468 if (!(tfo->get_cmd_state)) {
469 printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
470 return -EINVAL;
471 }
472 if (!(tfo->new_cmd_failure)) {
473 printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
474 return -EINVAL;
475 }
476 if (!(tfo->queue_data_in)) {
477 printk(KERN_ERR "Missing tfo->queue_data_in()\n");
478 return -EINVAL;
479 }
480 if (!(tfo->queue_status)) {
481 printk(KERN_ERR "Missing tfo->queue_status()\n");
482 return -EINVAL;
483 }
484 if (!(tfo->queue_tm_rsp)) {
485 printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
486 return -EINVAL;
487 }
488 if (!(tfo->set_fabric_sense_len)) {
489 printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
490 return -EINVAL;
491 }
492 if (!(tfo->get_fabric_sense_len)) {
493 printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
494 return -EINVAL;
495 }
496 if (!(tfo->is_state_remove)) {
497 printk(KERN_ERR "Missing tfo->is_state_remove()\n");
498 return -EINVAL;
499 }
500 if (!(tfo->pack_lun)) {
501 printk(KERN_ERR "Missing tfo->pack_lun()\n");
502 return -EINVAL;
503 }
504 /*
505 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
506 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
507 * target_core_fabric_configfs.c WWN+TPG group context code.
508 */
509 if (!(tfo->fabric_make_wwn)) {
510 printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
511 return -EINVAL;
512 }
513 if (!(tfo->fabric_drop_wwn)) {
514 printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
515 return -EINVAL;
516 }
517 if (!(tfo->fabric_make_tpg)) {
518 printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
519 return -EINVAL;
520 }
521 if (!(tfo->fabric_drop_tpg)) {
522 printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
523 return -EINVAL;
524 }
525
526 return 0;
527}
528
529/*
530 * Called 2nd from fabric module with returned parameter of
531 * struct target_fabric_configfs * from target_fabric_configfs_init().
532 *
533 * Upon a successful registration, the new fabric's struct config_item is
534 * return. Also, a pointer to this struct is set in the passed
535 * struct target_fabric_configfs.
536 */
537int target_fabric_configfs_register(
538 struct target_fabric_configfs *tf)
539{
540 struct config_group *su_group;
541 int ret;
542
543 if (!(tf)) {
544 printk(KERN_ERR "Unable to locate target_fabric_configfs"
545 " pointer\n");
546 return -EINVAL;
547 }
548 if (!(tf->tf_subsys)) {
549 printk(KERN_ERR "Unable to target struct config_subsystem"
550 " pointer\n");
551 return -EINVAL;
552 }
553 su_group = &tf->tf_subsys->su_group;
554 if (!(su_group)) {
555 printk(KERN_ERR "Unable to locate target struct config_group"
556 " pointer\n");
557 return -EINVAL;
558 }
559 ret = target_fabric_tf_ops_check(tf);
560 if (ret < 0)
561 return ret;
562
563 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
564 ">>>>>>>>>>\n");
565 return 0;
566}
567EXPORT_SYMBOL(target_fabric_configfs_register);
568
569void target_fabric_configfs_deregister(
570 struct target_fabric_configfs *tf)
571{
572 struct config_group *su_group;
573 struct configfs_subsystem *su;
574
575 if (!(tf)) {
576 printk(KERN_ERR "Unable to locate passed target_fabric_"
577 "configfs\n");
578 return;
579 }
580 su = tf->tf_subsys;
581 if (!(su)) {
582 printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
583 " pointer\n");
584 return;
585 }
586 su_group = &tf->tf_subsys->su_group;
587 if (!(su_group)) {
588 printk(KERN_ERR "Unable to locate target struct config_group"
589 " pointer\n");
590 return;
591 }
592
593 printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
594 ">>>>>>>>>>>>\n");
595 mutex_lock(&g_tf_lock);
596 if (atomic_read(&tf->tf_access_cnt)) {
597 mutex_unlock(&g_tf_lock);
598 printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
599 tf->tf_name);
600 BUG();
601 }
602 list_del(&tf->tf_list);
603 mutex_unlock(&g_tf_lock);
604
605 printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
606 " %s\n", tf->tf_name);
607 tf->tf_module = NULL;
608 tf->tf_subsys = NULL;
609 kfree(tf);
610
611 printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
612 ">>>>>\n");
613 return;
614}
615EXPORT_SYMBOL(target_fabric_configfs_deregister);
616
617/*##############################################################################
618// Stop functions called by external Target Fabrics Modules
619//############################################################################*/
620
621/* Start functions for struct config_item_type target_core_dev_attrib_cit */
622
623#define DEF_DEV_ATTRIB_SHOW(_name) \
624static ssize_t target_core_dev_show_attr_##_name( \
625 struct se_dev_attrib *da, \
626 char *page) \
627{ \
628 struct se_device *dev; \
629 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
630 ssize_t rb; \
631 \
632 spin_lock(&se_dev->se_dev_lock); \
633 dev = se_dev->se_dev_ptr; \
634 if (!(dev)) { \
635 spin_unlock(&se_dev->se_dev_lock); \
636 return -ENODEV; \
637 } \
638 rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
639 spin_unlock(&se_dev->se_dev_lock); \
640 \
641 return rb; \
642}
643
644#define DEF_DEV_ATTRIB_STORE(_name) \
645static ssize_t target_core_dev_store_attr_##_name( \
646 struct se_dev_attrib *da, \
647 const char *page, \
648 size_t count) \
649{ \
650 struct se_device *dev; \
651 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
652 unsigned long val; \
653 int ret; \
654 \
655 spin_lock(&se_dev->se_dev_lock); \
656 dev = se_dev->se_dev_ptr; \
657 if (!(dev)) { \
658 spin_unlock(&se_dev->se_dev_lock); \
659 return -ENODEV; \
660 } \
661 ret = strict_strtoul(page, 0, &val); \
662 if (ret < 0) { \
663 spin_unlock(&se_dev->se_dev_lock); \
664 printk(KERN_ERR "strict_strtoul() failed with" \
665 " ret: %d\n", ret); \
666 return -EINVAL; \
667 } \
668 ret = se_dev_set_##_name(dev, (u32)val); \
669 spin_unlock(&se_dev->se_dev_lock); \
670 \
671 return (!ret) ? count : -EINVAL; \
672}
673
674#define DEF_DEV_ATTRIB(_name) \
675DEF_DEV_ATTRIB_SHOW(_name); \
676DEF_DEV_ATTRIB_STORE(_name);
677
678#define DEF_DEV_ATTRIB_RO(_name) \
679DEF_DEV_ATTRIB_SHOW(_name);
680
681CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
682#define SE_DEV_ATTR(_name, _mode) \
683static struct target_core_dev_attrib_attribute \
684 target_core_dev_attrib_##_name = \
685 __CONFIGFS_EATTR(_name, _mode, \
686 target_core_dev_show_attr_##_name, \
687 target_core_dev_store_attr_##_name);
688
689#define SE_DEV_ATTR_RO(_name); \
690static struct target_core_dev_attrib_attribute \
691 target_core_dev_attrib_##_name = \
692 __CONFIGFS_EATTR_RO(_name, \
693 target_core_dev_show_attr_##_name);
694
695DEF_DEV_ATTRIB(emulate_dpo);
696SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
697
698DEF_DEV_ATTRIB(emulate_fua_write);
699SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
700
701DEF_DEV_ATTRIB(emulate_fua_read);
702SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
703
704DEF_DEV_ATTRIB(emulate_write_cache);
705SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
706
707DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
708SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
709
710DEF_DEV_ATTRIB(emulate_tas);
711SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
712
713DEF_DEV_ATTRIB(emulate_tpu);
714SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
715
716DEF_DEV_ATTRIB(emulate_tpws);
717SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
718
719DEF_DEV_ATTRIB(enforce_pr_isids);
720SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
721
722DEF_DEV_ATTRIB_RO(hw_block_size);
723SE_DEV_ATTR_RO(hw_block_size);
724
725DEF_DEV_ATTRIB(block_size);
726SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
727
728DEF_DEV_ATTRIB_RO(hw_max_sectors);
729SE_DEV_ATTR_RO(hw_max_sectors);
730
731DEF_DEV_ATTRIB(max_sectors);
732SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
733
734DEF_DEV_ATTRIB(optimal_sectors);
735SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
736
737DEF_DEV_ATTRIB_RO(hw_queue_depth);
738SE_DEV_ATTR_RO(hw_queue_depth);
739
740DEF_DEV_ATTRIB(queue_depth);
741SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
742
743DEF_DEV_ATTRIB(task_timeout);
744SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
745
746DEF_DEV_ATTRIB(max_unmap_lba_count);
747SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
748
749DEF_DEV_ATTRIB(max_unmap_block_desc_count);
750SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
751
752DEF_DEV_ATTRIB(unmap_granularity);
753SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
754
755DEF_DEV_ATTRIB(unmap_granularity_alignment);
756SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
757
758CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
759
760static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
761 &target_core_dev_attrib_emulate_dpo.attr,
762 &target_core_dev_attrib_emulate_fua_write.attr,
763 &target_core_dev_attrib_emulate_fua_read.attr,
764 &target_core_dev_attrib_emulate_write_cache.attr,
765 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
766 &target_core_dev_attrib_emulate_tas.attr,
767 &target_core_dev_attrib_emulate_tpu.attr,
768 &target_core_dev_attrib_emulate_tpws.attr,
769 &target_core_dev_attrib_enforce_pr_isids.attr,
770 &target_core_dev_attrib_hw_block_size.attr,
771 &target_core_dev_attrib_block_size.attr,
772 &target_core_dev_attrib_hw_max_sectors.attr,
773 &target_core_dev_attrib_max_sectors.attr,
774 &target_core_dev_attrib_optimal_sectors.attr,
775 &target_core_dev_attrib_hw_queue_depth.attr,
776 &target_core_dev_attrib_queue_depth.attr,
777 &target_core_dev_attrib_task_timeout.attr,
778 &target_core_dev_attrib_max_unmap_lba_count.attr,
779 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
780 &target_core_dev_attrib_unmap_granularity.attr,
781 &target_core_dev_attrib_unmap_granularity_alignment.attr,
782 NULL,
783};
784
785static struct configfs_item_operations target_core_dev_attrib_ops = {
786 .show_attribute = target_core_dev_attrib_attr_show,
787 .store_attribute = target_core_dev_attrib_attr_store,
788};
789
790static struct config_item_type target_core_dev_attrib_cit = {
791 .ct_item_ops = &target_core_dev_attrib_ops,
792 .ct_attrs = target_core_dev_attrib_attrs,
793 .ct_owner = THIS_MODULE,
794};
795
796/* End functions for struct config_item_type target_core_dev_attrib_cit */
797
798/* Start functions for struct config_item_type target_core_dev_wwn_cit */
799
800CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
801#define SE_DEV_WWN_ATTR(_name, _mode) \
802static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
803 __CONFIGFS_EATTR(_name, _mode, \
804 target_core_dev_wwn_show_attr_##_name, \
805 target_core_dev_wwn_store_attr_##_name);
806
807#define SE_DEV_WWN_ATTR_RO(_name); \
808do { \
809 static struct target_core_dev_wwn_attribute \
810 target_core_dev_wwn_##_name = \
811 __CONFIGFS_EATTR_RO(_name, \
812 target_core_dev_wwn_show_attr_##_name); \
813} while (0);
814
815/*
816 * VPD page 0x80 Unit serial
817 */
818static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
819 struct t10_wwn *t10_wwn,
820 char *page)
821{
822 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
823 struct se_device *dev;
824
825 dev = se_dev->se_dev_ptr;
826 if (!(dev))
827 return -ENODEV;
828
829 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
830 &t10_wwn->unit_serial[0]);
831}
832
833static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
834 struct t10_wwn *t10_wwn,
835 const char *page,
836 size_t count)
837{
838 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
839 struct se_device *dev;
840 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
841
842 /*
843 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
844 * from the struct scsi_device level firmware, do not allow
845 * VPD Unit Serial to be emulated.
846 *
847 * Note this struct scsi_device could also be emulating VPD
848 * information from its drivers/scsi LLD. But for now we assume
849 * it is doing 'the right thing' wrt a world wide unique
850 * VPD Unit Serial Number that OS dependent multipath can depend on.
851 */
852 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
853 printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
854 " Unit Serial, ignoring request\n");
855 return -EOPNOTSUPP;
856 }
857
858 if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
859 printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
860 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
861 return -EOVERFLOW;
862 }
863 /*
864 * Check to see if any active $FABRIC_MOD exports exist. If they
865 * do exist, fail here as changing this information on the fly
866 * (underneath the initiator side OS dependent multipath code)
867 * could cause negative effects.
868 */
869 dev = su_dev->se_dev_ptr;
870 if ((dev)) {
871 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
872 printk(KERN_ERR "Unable to set VPD Unit Serial while"
873 " active %d $FABRIC_MOD exports exist\n",
874 atomic_read(&dev->dev_export_obj.obj_access_count));
875 return -EINVAL;
876 }
877 }
878 /*
879 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
880 *
881 * Also, strip any newline added from the userspace
882 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
883 */
884 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
885 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
886 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
887 "%s", strstrip(buf));
888 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
889
890 printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
891 " %s\n", su_dev->t10_wwn.unit_serial);
892
893 return count;
894}
895
896SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
897
898/*
899 * VPD page 0x83 Protocol Identifier
900 */
901static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
902 struct t10_wwn *t10_wwn,
903 char *page)
904{
905 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
906 struct se_device *dev;
907 struct t10_vpd *vpd;
908 unsigned char buf[VPD_TMP_BUF_SIZE];
909 ssize_t len = 0;
910
911 dev = se_dev->se_dev_ptr;
912 if (!(dev))
913 return -ENODEV;
914
915 memset(buf, 0, VPD_TMP_BUF_SIZE);
916
917 spin_lock(&t10_wwn->t10_vpd_lock);
918 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
919 if (!(vpd->protocol_identifier_set))
920 continue;
921
922 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
923
924 if ((len + strlen(buf) > PAGE_SIZE))
925 break;
926
927 len += sprintf(page+len, "%s", buf);
928 }
929 spin_unlock(&t10_wwn->t10_vpd_lock);
930
931 return len;
932}
933
934static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
935 struct t10_wwn *t10_wwn,
936 const char *page,
937 size_t count)
938{
939 return -ENOSYS;
940}
941
942SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
943
944/*
945 * Generic wrapper for dumping VPD identifiers by association.
946 */
947#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
948static ssize_t target_core_dev_wwn_show_attr_##_name( \
949 struct t10_wwn *t10_wwn, \
950 char *page) \
951{ \
952 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
953 struct se_device *dev; \
954 struct t10_vpd *vpd; \
955 unsigned char buf[VPD_TMP_BUF_SIZE]; \
956 ssize_t len = 0; \
957 \
958 dev = se_dev->se_dev_ptr; \
959 if (!(dev)) \
960 return -ENODEV; \
961 \
962 spin_lock(&t10_wwn->t10_vpd_lock); \
963 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
964 if (vpd->association != _assoc) \
965 continue; \
966 \
967 memset(buf, 0, VPD_TMP_BUF_SIZE); \
968 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
969 if ((len + strlen(buf) > PAGE_SIZE)) \
970 break; \
971 len += sprintf(page+len, "%s", buf); \
972 \
973 memset(buf, 0, VPD_TMP_BUF_SIZE); \
974 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
975 if ((len + strlen(buf) > PAGE_SIZE)) \
976 break; \
977 len += sprintf(page+len, "%s", buf); \
978 \
979 memset(buf, 0, VPD_TMP_BUF_SIZE); \
980 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
981 if ((len + strlen(buf) > PAGE_SIZE)) \
982 break; \
983 len += sprintf(page+len, "%s", buf); \
984 } \
985 spin_unlock(&t10_wwn->t10_vpd_lock); \
986 \
987 return len; \
988}
989
990/*
991 * VPD page 0x83 Assoication: Logical Unit
992 */
993DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
994
995static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
996 struct t10_wwn *t10_wwn,
997 const char *page,
998 size_t count)
999{
1000 return -ENOSYS;
1001}
1002
1003SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
1004
1005/*
1006 * VPD page 0x83 Association: Target Port
1007 */
1008DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1009
1010static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
1011 struct t10_wwn *t10_wwn,
1012 const char *page,
1013 size_t count)
1014{
1015 return -ENOSYS;
1016}
1017
1018SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
1019
1020/*
1021 * VPD page 0x83 Association: SCSI Target Device
1022 */
1023DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1024
1025static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
1026 struct t10_wwn *t10_wwn,
1027 const char *page,
1028 size_t count)
1029{
1030 return -ENOSYS;
1031}
1032
1033SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
1034
1035CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
1036
1037static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1038 &target_core_dev_wwn_vpd_unit_serial.attr,
1039 &target_core_dev_wwn_vpd_protocol_identifier.attr,
1040 &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
1041 &target_core_dev_wwn_vpd_assoc_target_port.attr,
1042 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
1043 NULL,
1044};
1045
1046static struct configfs_item_operations target_core_dev_wwn_ops = {
1047 .show_attribute = target_core_dev_wwn_attr_show,
1048 .store_attribute = target_core_dev_wwn_attr_store,
1049};
1050
1051static struct config_item_type target_core_dev_wwn_cit = {
1052 .ct_item_ops = &target_core_dev_wwn_ops,
1053 .ct_attrs = target_core_dev_wwn_attrs,
1054 .ct_owner = THIS_MODULE,
1055};
1056
1057/* End functions for struct config_item_type target_core_dev_wwn_cit */
1058
1059/* Start functions for struct config_item_type target_core_dev_pr_cit */
1060
1061CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
1062#define SE_DEV_PR_ATTR(_name, _mode) \
1063static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1064 __CONFIGFS_EATTR(_name, _mode, \
1065 target_core_dev_pr_show_attr_##_name, \
1066 target_core_dev_pr_store_attr_##_name);
1067
1068#define SE_DEV_PR_ATTR_RO(_name); \
1069static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1070 __CONFIGFS_EATTR_RO(_name, \
1071 target_core_dev_pr_show_attr_##_name);
1072
1073/*
1074 * res_holder
1075 */
1076static ssize_t target_core_dev_pr_show_spc3_res(
1077 struct se_device *dev,
1078 char *page,
1079 ssize_t *len)
1080{
1081 struct se_node_acl *se_nacl;
1082 struct t10_pr_registration *pr_reg;
1083 char i_buf[PR_REG_ISID_ID_LEN];
1084 int prf_isid;
1085
1086 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1087
1088 spin_lock(&dev->dev_reservation_lock);
1089 pr_reg = dev->dev_pr_res_holder;
1090 if (!(pr_reg)) {
1091 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
1092 spin_unlock(&dev->dev_reservation_lock);
1093 return *len;
1094 }
1095 se_nacl = pr_reg->pr_reg_nacl;
1096 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1097 PR_REG_ISID_ID_LEN);
1098
1099 *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
1100 TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
1101 se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
1102 spin_unlock(&dev->dev_reservation_lock);
1103
1104 return *len;
1105}
1106
1107static ssize_t target_core_dev_pr_show_spc2_res(
1108 struct se_device *dev,
1109 char *page,
1110 ssize_t *len)
1111{
1112 struct se_node_acl *se_nacl;
1113
1114 spin_lock(&dev->dev_reservation_lock);
1115 se_nacl = dev->dev_reserved_node_acl;
1116 if (!(se_nacl)) {
1117 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
1118 spin_unlock(&dev->dev_reservation_lock);
1119 return *len;
1120 }
1121 *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
1122 TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
1123 se_nacl->initiatorname);
1124 spin_unlock(&dev->dev_reservation_lock);
1125
1126 return *len;
1127}
1128
1129static ssize_t target_core_dev_pr_show_attr_res_holder(
1130 struct se_subsystem_dev *su_dev,
1131 char *page)
1132{
1133 ssize_t len = 0;
1134
1135 if (!(su_dev->se_dev_ptr))
1136 return -ENODEV;
1137
1138 switch (T10_RES(su_dev)->res_type) {
1139 case SPC3_PERSISTENT_RESERVATIONS:
1140 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
1141 page, &len);
1142 break;
1143 case SPC2_RESERVATIONS:
1144 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
1145 page, &len);
1146 break;
1147 case SPC_PASSTHROUGH:
1148 len += sprintf(page+len, "Passthrough\n");
1149 break;
1150 default:
1151 len += sprintf(page+len, "Unknown\n");
1152 break;
1153 }
1154
1155 return len;
1156}
1157
1158SE_DEV_PR_ATTR_RO(res_holder);
1159
1160/*
1161 * res_pr_all_tgt_pts
1162 */
1163static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1164 struct se_subsystem_dev *su_dev,
1165 char *page)
1166{
1167 struct se_device *dev;
1168 struct t10_pr_registration *pr_reg;
1169 ssize_t len = 0;
1170
1171 dev = su_dev->se_dev_ptr;
1172 if (!(dev))
1173 return -ENODEV;
1174
1175 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1176 return len;
1177
1178 spin_lock(&dev->dev_reservation_lock);
1179 pr_reg = dev->dev_pr_res_holder;
1180 if (!(pr_reg)) {
1181 len = sprintf(page, "No SPC-3 Reservation holder\n");
1182 spin_unlock(&dev->dev_reservation_lock);
1183 return len;
1184 }
1185 /*
1186 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
1187 * Basic PERSISTENT RESERVER OUT parameter list, page 290
1188 */
1189 if (pr_reg->pr_reg_all_tg_pt)
1190 len = sprintf(page, "SPC-3 Reservation: All Target"
1191 " Ports registration\n");
1192 else
1193 len = sprintf(page, "SPC-3 Reservation: Single"
1194 " Target Port registration\n");
1195 spin_unlock(&dev->dev_reservation_lock);
1196
1197 return len;
1198}
1199
1200SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1201
1202/*
1203 * res_pr_generation
1204 */
1205static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1206 struct se_subsystem_dev *su_dev,
1207 char *page)
1208{
1209 if (!(su_dev->se_dev_ptr))
1210 return -ENODEV;
1211
1212 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1213 return 0;
1214
1215 return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
1216}
1217
1218SE_DEV_PR_ATTR_RO(res_pr_generation);
1219
1220/*
1221 * res_pr_holder_tg_port
1222 */
1223static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1224 struct se_subsystem_dev *su_dev,
1225 char *page)
1226{
1227 struct se_device *dev;
1228 struct se_node_acl *se_nacl;
1229 struct se_lun *lun;
1230 struct se_portal_group *se_tpg;
1231 struct t10_pr_registration *pr_reg;
1232 struct target_core_fabric_ops *tfo;
1233 ssize_t len = 0;
1234
1235 dev = su_dev->se_dev_ptr;
1236 if (!(dev))
1237 return -ENODEV;
1238
1239 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1240 return len;
1241
1242 spin_lock(&dev->dev_reservation_lock);
1243 pr_reg = dev->dev_pr_res_holder;
1244 if (!(pr_reg)) {
1245 len = sprintf(page, "No SPC-3 Reservation holder\n");
1246 spin_unlock(&dev->dev_reservation_lock);
1247 return len;
1248 }
1249 se_nacl = pr_reg->pr_reg_nacl;
1250 se_tpg = se_nacl->se_tpg;
1251 lun = pr_reg->pr_reg_tg_pt_lun;
1252 tfo = TPG_TFO(se_tpg);
1253
1254 len += sprintf(page+len, "SPC-3 Reservation: %s"
1255 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1256 tfo->tpg_get_wwn(se_tpg));
1257 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1258 " Identifer Tag: %hu %s Portal Group Tag: %hu"
1259 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
1260 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1261 tfo->get_fabric_name(), lun->unpacked_lun);
1262 spin_unlock(&dev->dev_reservation_lock);
1263
1264 return len;
1265}
1266
1267SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1268
1269/*
1270 * res_pr_registered_i_pts
1271 */
1272static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1273 struct se_subsystem_dev *su_dev,
1274 char *page)
1275{
1276 struct target_core_fabric_ops *tfo;
1277 struct t10_pr_registration *pr_reg;
1278 unsigned char buf[384];
1279 char i_buf[PR_REG_ISID_ID_LEN];
1280 ssize_t len = 0;
1281 int reg_count = 0, prf_isid;
1282
1283 if (!(su_dev->se_dev_ptr))
1284 return -ENODEV;
1285
1286 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1287 return len;
1288
1289 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1290
1291 spin_lock(&T10_RES(su_dev)->registration_lock);
1292 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
1293 pr_reg_list) {
1294
1295 memset(buf, 0, 384);
1296 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1297 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1298 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1299 PR_REG_ISID_ID_LEN);
1300 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1301 tfo->get_fabric_name(),
1302 pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
1303 &i_buf[0] : "", pr_reg->pr_res_key,
1304 pr_reg->pr_res_generation);
1305
1306 if ((len + strlen(buf) > PAGE_SIZE))
1307 break;
1308
1309 len += sprintf(page+len, "%s", buf);
1310 reg_count++;
1311 }
1312 spin_unlock(&T10_RES(su_dev)->registration_lock);
1313
1314 if (!(reg_count))
1315 len += sprintf(page+len, "None\n");
1316
1317 return len;
1318}
1319
1320SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1321
1322/*
1323 * res_pr_type
1324 */
1325static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1326 struct se_subsystem_dev *su_dev,
1327 char *page)
1328{
1329 struct se_device *dev;
1330 struct t10_pr_registration *pr_reg;
1331 ssize_t len = 0;
1332
1333 dev = su_dev->se_dev_ptr;
1334 if (!(dev))
1335 return -ENODEV;
1336
1337 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1338 return len;
1339
1340 spin_lock(&dev->dev_reservation_lock);
1341 pr_reg = dev->dev_pr_res_holder;
1342 if (!(pr_reg)) {
1343 len = sprintf(page, "No SPC-3 Reservation holder\n");
1344 spin_unlock(&dev->dev_reservation_lock);
1345 return len;
1346 }
1347 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1348 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1349 spin_unlock(&dev->dev_reservation_lock);
1350
1351 return len;
1352}
1353
1354SE_DEV_PR_ATTR_RO(res_pr_type);
1355
1356/*
1357 * res_type
1358 */
1359static ssize_t target_core_dev_pr_show_attr_res_type(
1360 struct se_subsystem_dev *su_dev,
1361 char *page)
1362{
1363 ssize_t len = 0;
1364
1365 if (!(su_dev->se_dev_ptr))
1366 return -ENODEV;
1367
1368 switch (T10_RES(su_dev)->res_type) {
1369 case SPC3_PERSISTENT_RESERVATIONS:
1370 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1371 break;
1372 case SPC2_RESERVATIONS:
1373 len = sprintf(page, "SPC2_RESERVATIONS\n");
1374 break;
1375 case SPC_PASSTHROUGH:
1376 len = sprintf(page, "SPC_PASSTHROUGH\n");
1377 break;
1378 default:
1379 len = sprintf(page, "UNKNOWN\n");
1380 break;
1381 }
1382
1383 return len;
1384}
1385
1386SE_DEV_PR_ATTR_RO(res_type);
1387
1388/*
1389 * res_aptpl_active
1390 */
1391
1392static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1393 struct se_subsystem_dev *su_dev,
1394 char *page)
1395{
1396 if (!(su_dev->se_dev_ptr))
1397 return -ENODEV;
1398
1399 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1400 return 0;
1401
1402 return sprintf(page, "APTPL Bit Status: %s\n",
1403 (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
1404}
1405
1406SE_DEV_PR_ATTR_RO(res_aptpl_active);
1407
1408/*
1409 * res_aptpl_metadata
1410 */
1411static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1412 struct se_subsystem_dev *su_dev,
1413 char *page)
1414{
1415 if (!(su_dev->se_dev_ptr))
1416 return -ENODEV;
1417
1418 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1419 return 0;
1420
1421 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1422}
1423
1424enum {
1425 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1426 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1427 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1428 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1429};
1430
1431static match_table_t tokens = {
1432 {Opt_initiator_fabric, "initiator_fabric=%s"},
1433 {Opt_initiator_node, "initiator_node=%s"},
1434 {Opt_initiator_sid, "initiator_sid=%s"},
1435 {Opt_sa_res_key, "sa_res_key=%s"},
1436 {Opt_res_holder, "res_holder=%d"},
1437 {Opt_res_type, "res_type=%d"},
1438 {Opt_res_scope, "res_scope=%d"},
1439 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1440 {Opt_mapped_lun, "mapped_lun=%d"},
1441 {Opt_target_fabric, "target_fabric=%s"},
1442 {Opt_target_node, "target_node=%s"},
1443 {Opt_tpgt, "tpgt=%d"},
1444 {Opt_port_rtpi, "port_rtpi=%d"},
1445 {Opt_target_lun, "target_lun=%d"},
1446 {Opt_err, NULL}
1447};
1448
1449static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1450 struct se_subsystem_dev *su_dev,
1451 const char *page,
1452 size_t count)
1453{
1454 struct se_device *dev;
1455 unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
1456 unsigned char *isid = NULL;
1457 char *orig, *ptr, *arg_p, *opts;
1458 substring_t args[MAX_OPT_ARGS];
1459 unsigned long long tmp_ll;
1460 u64 sa_res_key = 0;
1461 u32 mapped_lun = 0, target_lun = 0;
1462 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1463 u16 port_rpti = 0, tpgt = 0;
1464 u8 type = 0, scope;
1465
1466 dev = su_dev->se_dev_ptr;
1467 if (!(dev))
1468 return -ENODEV;
1469
1470 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
1471 return 0;
1472
1473 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1474 printk(KERN_INFO "Unable to process APTPL metadata while"
1475 " active fabric exports exist\n");
1476 return -EINVAL;
1477 }
1478
1479 opts = kstrdup(page, GFP_KERNEL);
1480 if (!opts)
1481 return -ENOMEM;
1482
1483 orig = opts;
1484 while ((ptr = strsep(&opts, ",")) != NULL) {
1485 if (!*ptr)
1486 continue;
1487
1488 token = match_token(ptr, tokens, args);
1489 switch (token) {
1490 case Opt_initiator_fabric:
1491 i_fabric = match_strdup(&args[0]);
1492 break;
1493 case Opt_initiator_node:
1494 i_port = match_strdup(&args[0]);
1495 if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
1496 printk(KERN_ERR "APTPL metadata initiator_node="
1497 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1498 PR_APTPL_MAX_IPORT_LEN);
1499 ret = -EINVAL;
1500 break;
1501 }
1502 break;
1503 case Opt_initiator_sid:
1504 isid = match_strdup(&args[0]);
1505 if (strlen(isid) > PR_REG_ISID_LEN) {
1506 printk(KERN_ERR "APTPL metadata initiator_isid"
1507 "= exceeds PR_REG_ISID_LEN: %d\n",
1508 PR_REG_ISID_LEN);
1509 ret = -EINVAL;
1510 break;
1511 }
1512 break;
1513 case Opt_sa_res_key:
1514 arg_p = match_strdup(&args[0]);
1515 ret = strict_strtoull(arg_p, 0, &tmp_ll);
1516 if (ret < 0) {
1517 printk(KERN_ERR "strict_strtoull() failed for"
1518 " sa_res_key=\n");
1519 goto out;
1520 }
1521 sa_res_key = (u64)tmp_ll;
1522 break;
1523 /*
1524 * PR APTPL Metadata for Reservation
1525 */
1526 case Opt_res_holder:
1527 match_int(args, &arg);
1528 res_holder = arg;
1529 break;
1530 case Opt_res_type:
1531 match_int(args, &arg);
1532 type = (u8)arg;
1533 break;
1534 case Opt_res_scope:
1535 match_int(args, &arg);
1536 scope = (u8)arg;
1537 break;
1538 case Opt_res_all_tg_pt:
1539 match_int(args, &arg);
1540 all_tg_pt = (int)arg;
1541 break;
1542 case Opt_mapped_lun:
1543 match_int(args, &arg);
1544 mapped_lun = (u32)arg;
1545 break;
1546 /*
1547 * PR APTPL Metadata for Target Port
1548 */
1549 case Opt_target_fabric:
1550 t_fabric = match_strdup(&args[0]);
1551 break;
1552 case Opt_target_node:
1553 t_port = match_strdup(&args[0]);
1554 if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
1555 printk(KERN_ERR "APTPL metadata target_node="
1556 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1557 PR_APTPL_MAX_TPORT_LEN);
1558 ret = -EINVAL;
1559 break;
1560 }
1561 break;
1562 case Opt_tpgt:
1563 match_int(args, &arg);
1564 tpgt = (u16)arg;
1565 break;
1566 case Opt_port_rtpi:
1567 match_int(args, &arg);
1568 port_rpti = (u16)arg;
1569 break;
1570 case Opt_target_lun:
1571 match_int(args, &arg);
1572 target_lun = (u32)arg;
1573 break;
1574 default:
1575 break;
1576 }
1577 }
1578
1579 if (!(i_port) || !(t_port) || !(sa_res_key)) {
1580 printk(KERN_ERR "Illegal parameters for APTPL registration\n");
1581 ret = -EINVAL;
1582 goto out;
1583 }
1584
1585 if (res_holder && !(type)) {
1586 printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
1587 " holder\n", type);
1588 ret = -EINVAL;
1589 goto out;
1590 }
1591
1592 ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
1593 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1594 res_holder, all_tg_pt, type);
1595out:
1596 kfree(orig);
1597 return (ret == 0) ? count : ret;
1598}
1599
1600SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1601
1602CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
1603
1604static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1605 &target_core_dev_pr_res_holder.attr,
1606 &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1607 &target_core_dev_pr_res_pr_generation.attr,
1608 &target_core_dev_pr_res_pr_holder_tg_port.attr,
1609 &target_core_dev_pr_res_pr_registered_i_pts.attr,
1610 &target_core_dev_pr_res_pr_type.attr,
1611 &target_core_dev_pr_res_type.attr,
1612 &target_core_dev_pr_res_aptpl_active.attr,
1613 &target_core_dev_pr_res_aptpl_metadata.attr,
1614 NULL,
1615};
1616
1617static struct configfs_item_operations target_core_dev_pr_ops = {
1618 .show_attribute = target_core_dev_pr_attr_show,
1619 .store_attribute = target_core_dev_pr_attr_store,
1620};
1621
1622static struct config_item_type target_core_dev_pr_cit = {
1623 .ct_item_ops = &target_core_dev_pr_ops,
1624 .ct_attrs = target_core_dev_pr_attrs,
1625 .ct_owner = THIS_MODULE,
1626};
1627
1628/* End functions for struct config_item_type target_core_dev_pr_cit */
1629
1630/* Start functions for struct config_item_type target_core_dev_cit */
1631
1632static ssize_t target_core_show_dev_info(void *p, char *page)
1633{
1634 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1635 struct se_hba *hba = se_dev->se_dev_hba;
1636 struct se_subsystem_api *t = hba->transport;
1637 int bl = 0;
1638 ssize_t read_bytes = 0;
1639
1640 if (!(se_dev->se_dev_ptr))
1641 return -ENODEV;
1642
1643 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1644 read_bytes += bl;
1645 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
1646 return read_bytes;
1647}
1648
1649static struct target_core_configfs_attribute target_core_attr_dev_info = {
1650 .attr = { .ca_owner = THIS_MODULE,
1651 .ca_name = "info",
1652 .ca_mode = S_IRUGO },
1653 .show = target_core_show_dev_info,
1654 .store = NULL,
1655};
1656
1657static ssize_t target_core_store_dev_control(
1658 void *p,
1659 const char *page,
1660 size_t count)
1661{
1662 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1663 struct se_hba *hba = se_dev->se_dev_hba;
1664 struct se_subsystem_api *t = hba->transport;
1665
1666 if (!(se_dev->se_dev_su_ptr)) {
1667 printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
1668 "_dev_su_ptr\n");
1669 return -EINVAL;
1670 }
1671
1672 return t->set_configfs_dev_params(hba, se_dev, page, count);
1673}
1674
1675static struct target_core_configfs_attribute target_core_attr_dev_control = {
1676 .attr = { .ca_owner = THIS_MODULE,
1677 .ca_name = "control",
1678 .ca_mode = S_IWUSR },
1679 .show = NULL,
1680 .store = target_core_store_dev_control,
1681};
1682
1683static ssize_t target_core_show_dev_alias(void *p, char *page)
1684{
1685 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1686
1687 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1688 return 0;
1689
1690 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
1691}
1692
1693static ssize_t target_core_store_dev_alias(
1694 void *p,
1695 const char *page,
1696 size_t count)
1697{
1698 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1699 struct se_hba *hba = se_dev->se_dev_hba;
1700 ssize_t read_bytes;
1701
1702 if (count > (SE_DEV_ALIAS_LEN-1)) {
1703 printk(KERN_ERR "alias count: %d exceeds"
1704 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1705 SE_DEV_ALIAS_LEN-1);
1706 return -EINVAL;
1707 }
1708
1709 se_dev->su_dev_flags |= SDF_USING_ALIAS;
1710 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1711 "%s", page);
1712
1713 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
1714 config_item_name(&hba->hba_group.cg_item),
1715 config_item_name(&se_dev->se_dev_group.cg_item),
1716 se_dev->se_dev_alias);
1717
1718 return read_bytes;
1719}
1720
1721static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1722 .attr = { .ca_owner = THIS_MODULE,
1723 .ca_name = "alias",
1724 .ca_mode = S_IRUGO | S_IWUSR },
1725 .show = target_core_show_dev_alias,
1726 .store = target_core_store_dev_alias,
1727};
1728
1729static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1730{
1731 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1732
1733 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1734 return 0;
1735
1736 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
1737}
1738
1739static ssize_t target_core_store_dev_udev_path(
1740 void *p,
1741 const char *page,
1742 size_t count)
1743{
1744 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1745 struct se_hba *hba = se_dev->se_dev_hba;
1746 ssize_t read_bytes;
1747
1748 if (count > (SE_UDEV_PATH_LEN-1)) {
1749 printk(KERN_ERR "udev_path count: %d exceeds"
1750 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1751 SE_UDEV_PATH_LEN-1);
1752 return -EINVAL;
1753 }
1754
1755 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1756 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1757 "%s", page);
1758
1759 printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1760 config_item_name(&hba->hba_group.cg_item),
1761 config_item_name(&se_dev->se_dev_group.cg_item),
1762 se_dev->se_dev_udev_path);
1763
1764 return read_bytes;
1765}
1766
1767static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1768 .attr = { .ca_owner = THIS_MODULE,
1769 .ca_name = "udev_path",
1770 .ca_mode = S_IRUGO | S_IWUSR },
1771 .show = target_core_show_dev_udev_path,
1772 .store = target_core_store_dev_udev_path,
1773};
1774
1775static ssize_t target_core_store_dev_enable(
1776 void *p,
1777 const char *page,
1778 size_t count)
1779{
1780 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1781 struct se_device *dev;
1782 struct se_hba *hba = se_dev->se_dev_hba;
1783 struct se_subsystem_api *t = hba->transport;
1784 char *ptr;
1785
1786 ptr = strstr(page, "1");
1787 if (!(ptr)) {
1788 printk(KERN_ERR "For dev_enable ops, only valid value"
1789 " is \"1\"\n");
1790 return -EINVAL;
1791 }
1792 if ((se_dev->se_dev_ptr)) {
1793 printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
1794 " object\n");
1795 return -EEXIST;
1796 }
1797
1798 if (t->check_configfs_dev_params(hba, se_dev) < 0)
1799 return -EINVAL;
1800
1801 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1802 if (!(dev) || IS_ERR(dev))
1803 return -EINVAL;
1804
1805 se_dev->se_dev_ptr = dev;
1806 printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1807 " %p\n", se_dev->se_dev_ptr);
1808
1809 return count;
1810}
1811
1812static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1813 .attr = { .ca_owner = THIS_MODULE,
1814 .ca_name = "enable",
1815 .ca_mode = S_IWUSR },
1816 .show = NULL,
1817 .store = target_core_store_dev_enable,
1818};
1819
1820static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1821{
1822 struct se_device *dev;
1823 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1824 struct config_item *lu_ci;
1825 struct t10_alua_lu_gp *lu_gp;
1826 struct t10_alua_lu_gp_member *lu_gp_mem;
1827 ssize_t len = 0;
1828
1829 dev = su_dev->se_dev_ptr;
1830 if (!(dev))
1831 return -ENODEV;
1832
1833 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
1834 return len;
1835
1836 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1837 if (!(lu_gp_mem)) {
1838 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
1839 " pointer\n");
1840 return -EINVAL;
1841 }
1842
1843 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1844 lu_gp = lu_gp_mem->lu_gp;
1845 if ((lu_gp)) {
1846 lu_ci = &lu_gp->lu_gp_group.cg_item;
1847 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1848 config_item_name(lu_ci), lu_gp->lu_gp_id);
1849 }
1850 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1851
1852 return len;
1853}
1854
1855static ssize_t target_core_store_alua_lu_gp(
1856 void *p,
1857 const char *page,
1858 size_t count)
1859{
1860 struct se_device *dev;
1861 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1862 struct se_hba *hba = su_dev->se_dev_hba;
1863 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1864 struct t10_alua_lu_gp_member *lu_gp_mem;
1865 unsigned char buf[LU_GROUP_NAME_BUF];
1866 int move = 0;
1867
1868 dev = su_dev->se_dev_ptr;
1869 if (!(dev))
1870 return -ENODEV;
1871
1872 if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
1873 printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1874 config_item_name(&hba->hba_group.cg_item),
1875 config_item_name(&su_dev->se_dev_group.cg_item));
1876 return -EINVAL;
1877 }
1878 if (count > LU_GROUP_NAME_BUF) {
1879 printk(KERN_ERR "ALUA LU Group Alias too large!\n");
1880 return -EINVAL;
1881 }
1882 memset(buf, 0, LU_GROUP_NAME_BUF);
1883 memcpy(buf, page, count);
1884 /*
1885 * Any ALUA logical unit alias besides "NULL" means we will be
1886 * making a new group association.
1887 */
1888 if (strcmp(strstrip(buf), "NULL")) {
1889 /*
1890 * core_alua_get_lu_gp_by_name() will increment reference to
1891 * struct t10_alua_lu_gp. This reference is released with
1892 * core_alua_get_lu_gp_by_name below().
1893 */
1894 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1895 if (!(lu_gp_new))
1896 return -ENODEV;
1897 }
1898 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1899 if (!(lu_gp_mem)) {
1900 if (lu_gp_new)
1901 core_alua_put_lu_gp_from_name(lu_gp_new);
1902 printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
1903 " pointer\n");
1904 return -EINVAL;
1905 }
1906
1907 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1908 lu_gp = lu_gp_mem->lu_gp;
1909 if ((lu_gp)) {
1910 /*
1911 * Clearing an existing lu_gp association, and replacing
1912 * with NULL
1913 */
1914 if (!(lu_gp_new)) {
1915 printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
1916 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1917 " %hu\n",
1918 config_item_name(&hba->hba_group.cg_item),
1919 config_item_name(&su_dev->se_dev_group.cg_item),
1920 config_item_name(&lu_gp->lu_gp_group.cg_item),
1921 lu_gp->lu_gp_id);
1922
1923 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1924 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1925
1926 return count;
1927 }
1928 /*
1929 * Removing existing association of lu_gp_mem with lu_gp
1930 */
1931 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1932 move = 1;
1933 }
1934 /*
1935 * Associate lu_gp_mem with lu_gp_new.
1936 */
1937 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1938 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1939
1940 printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1941 " core/alua/lu_gps/%s, ID: %hu\n",
1942 (move) ? "Moving" : "Adding",
1943 config_item_name(&hba->hba_group.cg_item),
1944 config_item_name(&su_dev->se_dev_group.cg_item),
1945 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1946 lu_gp_new->lu_gp_id);
1947
1948 core_alua_put_lu_gp_from_name(lu_gp_new);
1949 return count;
1950}
1951
1952static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1953 .attr = { .ca_owner = THIS_MODULE,
1954 .ca_name = "alua_lu_gp",
1955 .ca_mode = S_IRUGO | S_IWUSR },
1956 .show = target_core_show_alua_lu_gp,
1957 .store = target_core_store_alua_lu_gp,
1958};
1959
1960static struct configfs_attribute *lio_core_dev_attrs[] = {
1961 &target_core_attr_dev_info.attr,
1962 &target_core_attr_dev_control.attr,
1963 &target_core_attr_dev_alias.attr,
1964 &target_core_attr_dev_udev_path.attr,
1965 &target_core_attr_dev_enable.attr,
1966 &target_core_attr_dev_alua_lu_gp.attr,
1967 NULL,
1968};
1969
1970static void target_core_dev_release(struct config_item *item)
1971{
1972 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
1973 struct se_subsystem_dev, se_dev_group);
1974 struct config_group *dev_cg;
1975
1976 if (!(se_dev))
1977 return;
1978
1979 dev_cg = &se_dev->se_dev_group;
1980 kfree(dev_cg->default_groups);
1981}
1982
1983static ssize_t target_core_dev_show(struct config_item *item,
1984 struct configfs_attribute *attr,
1985 char *page)
1986{
1987 struct se_subsystem_dev *se_dev = container_of(
1988 to_config_group(item), struct se_subsystem_dev,
1989 se_dev_group);
1990 struct target_core_configfs_attribute *tc_attr = container_of(
1991 attr, struct target_core_configfs_attribute, attr);
1992
1993 if (!(tc_attr->show))
1994 return -EINVAL;
1995
1996 return tc_attr->show((void *)se_dev, page);
1997}
1998
1999static ssize_t target_core_dev_store(struct config_item *item,
2000 struct configfs_attribute *attr,
2001 const char *page, size_t count)
2002{
2003 struct se_subsystem_dev *se_dev = container_of(
2004 to_config_group(item), struct se_subsystem_dev,
2005 se_dev_group);
2006 struct target_core_configfs_attribute *tc_attr = container_of(
2007 attr, struct target_core_configfs_attribute, attr);
2008
2009 if (!(tc_attr->store))
2010 return -EINVAL;
2011
2012 return tc_attr->store((void *)se_dev, page, count);
2013}
2014
2015static struct configfs_item_operations target_core_dev_item_ops = {
2016 .release = target_core_dev_release,
2017 .show_attribute = target_core_dev_show,
2018 .store_attribute = target_core_dev_store,
2019};
2020
2021static struct config_item_type target_core_dev_cit = {
2022 .ct_item_ops = &target_core_dev_item_ops,
2023 .ct_attrs = lio_core_dev_attrs,
2024 .ct_owner = THIS_MODULE,
2025};
2026
2027/* End functions for struct config_item_type target_core_dev_cit */
2028
2029/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2030
2031CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
2032#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
2033static struct target_core_alua_lu_gp_attribute \
2034 target_core_alua_lu_gp_##_name = \
2035 __CONFIGFS_EATTR(_name, _mode, \
2036 target_core_alua_lu_gp_show_attr_##_name, \
2037 target_core_alua_lu_gp_store_attr_##_name);
2038
2039#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
2040static struct target_core_alua_lu_gp_attribute \
2041 target_core_alua_lu_gp_##_name = \
2042 __CONFIGFS_EATTR_RO(_name, \
2043 target_core_alua_lu_gp_show_attr_##_name);
2044
2045/*
2046 * lu_gp_id
2047 */
2048static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
2049 struct t10_alua_lu_gp *lu_gp,
2050 char *page)
2051{
2052 if (!(lu_gp->lu_gp_valid_id))
2053 return 0;
2054
2055 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2056}
2057
2058static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2059 struct t10_alua_lu_gp *lu_gp,
2060 const char *page,
2061 size_t count)
2062{
2063 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2064 unsigned long lu_gp_id;
2065 int ret;
2066
2067 ret = strict_strtoul(page, 0, &lu_gp_id);
2068 if (ret < 0) {
2069 printk(KERN_ERR "strict_strtoul() returned %d for"
2070 " lu_gp_id\n", ret);
2071 return -EINVAL;
2072 }
2073 if (lu_gp_id > 0x0000ffff) {
2074 printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
2075 " 0x0000ffff\n", lu_gp_id);
2076 return -EINVAL;
2077 }
2078
2079 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2080 if (ret < 0)
2081 return -EINVAL;
2082
2083 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
2084 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2085 config_item_name(&alua_lu_gp_cg->cg_item),
2086 lu_gp->lu_gp_id);
2087
2088 return count;
2089}
2090
2091SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
2092
2093/*
2094 * members
2095 */
2096static ssize_t target_core_alua_lu_gp_show_attr_members(
2097 struct t10_alua_lu_gp *lu_gp,
2098 char *page)
2099{
2100 struct se_device *dev;
2101 struct se_hba *hba;
2102 struct se_subsystem_dev *su_dev;
2103 struct t10_alua_lu_gp_member *lu_gp_mem;
2104 ssize_t len = 0, cur_len;
2105 unsigned char buf[LU_GROUP_NAME_BUF];
2106
2107 memset(buf, 0, LU_GROUP_NAME_BUF);
2108
2109 spin_lock(&lu_gp->lu_gp_lock);
2110 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2111 dev = lu_gp_mem->lu_gp_mem_dev;
2112 su_dev = dev->se_sub_dev;
2113 hba = su_dev->se_dev_hba;
2114
2115 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2116 config_item_name(&hba->hba_group.cg_item),
2117 config_item_name(&su_dev->se_dev_group.cg_item));
2118 cur_len++; /* Extra byte for NULL terminator */
2119
2120 if ((cur_len + len) > PAGE_SIZE) {
2121 printk(KERN_WARNING "Ran out of lu_gp_show_attr"
2122 "_members buffer\n");
2123 break;
2124 }
2125 memcpy(page+len, buf, cur_len);
2126 len += cur_len;
2127 }
2128 spin_unlock(&lu_gp->lu_gp_lock);
2129
2130 return len;
2131}
2132
2133SE_DEV_ALUA_LU_ATTR_RO(members);
2134
2135CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
2136
2137static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2138 &target_core_alua_lu_gp_lu_gp_id.attr,
2139 &target_core_alua_lu_gp_members.attr,
2140 NULL,
2141};
2142
2143static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2144 .show_attribute = target_core_alua_lu_gp_attr_show,
2145 .store_attribute = target_core_alua_lu_gp_attr_store,
2146};
2147
2148static struct config_item_type target_core_alua_lu_gp_cit = {
2149 .ct_item_ops = &target_core_alua_lu_gp_ops,
2150 .ct_attrs = target_core_alua_lu_gp_attrs,
2151 .ct_owner = THIS_MODULE,
2152};
2153
2154/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2155
2156/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2157
2158static struct config_group *target_core_alua_create_lu_gp(
2159 struct config_group *group,
2160 const char *name)
2161{
2162 struct t10_alua_lu_gp *lu_gp;
2163 struct config_group *alua_lu_gp_cg = NULL;
2164 struct config_item *alua_lu_gp_ci = NULL;
2165
2166 lu_gp = core_alua_allocate_lu_gp(name, 0);
2167 if (IS_ERR(lu_gp))
2168 return NULL;
2169
2170 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2171 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2172
2173 config_group_init_type_name(alua_lu_gp_cg, name,
2174 &target_core_alua_lu_gp_cit);
2175
2176 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2177 " Group: core/alua/lu_gps/%s\n",
2178 config_item_name(alua_lu_gp_ci));
2179
2180 return alua_lu_gp_cg;
2181
2182}
2183
2184static void target_core_alua_drop_lu_gp(
2185 struct config_group *group,
2186 struct config_item *item)
2187{
2188 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2189 struct t10_alua_lu_gp, lu_gp_group);
2190
2191 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2192 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2193 config_item_name(item), lu_gp->lu_gp_id);
2194
2195 config_item_put(item);
2196 core_alua_free_lu_gp(lu_gp);
2197}
2198
2199static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2200 .make_group = &target_core_alua_create_lu_gp,
2201 .drop_item = &target_core_alua_drop_lu_gp,
2202};
2203
2204static struct config_item_type target_core_alua_lu_gps_cit = {
2205 .ct_item_ops = NULL,
2206 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2207 .ct_owner = THIS_MODULE,
2208};
2209
2210/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2211
2212/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2213
2214CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
2215#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
2216static struct target_core_alua_tg_pt_gp_attribute \
2217 target_core_alua_tg_pt_gp_##_name = \
2218 __CONFIGFS_EATTR(_name, _mode, \
2219 target_core_alua_tg_pt_gp_show_attr_##_name, \
2220 target_core_alua_tg_pt_gp_store_attr_##_name);
2221
2222#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
2223static struct target_core_alua_tg_pt_gp_attribute \
2224 target_core_alua_tg_pt_gp_##_name = \
2225 __CONFIGFS_EATTR_RO(_name, \
2226 target_core_alua_tg_pt_gp_show_attr_##_name);
2227
2228/*
2229 * alua_access_state
2230 */
2231static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2232 struct t10_alua_tg_pt_gp *tg_pt_gp,
2233 char *page)
2234{
2235 return sprintf(page, "%d\n",
2236 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
2237}
2238
2239static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2240 struct t10_alua_tg_pt_gp *tg_pt_gp,
2241 const char *page,
2242 size_t count)
2243{
2244 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
2245 unsigned long tmp;
2246 int new_state, ret;
2247
2248 if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
2249 printk(KERN_ERR "Unable to do implict ALUA on non valid"
2250 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2251 return -EINVAL;
2252 }
2253
2254 ret = strict_strtoul(page, 0, &tmp);
2255 if (ret < 0) {
2256 printk("Unable to extract new ALUA access state from"
2257 " %s\n", page);
2258 return -EINVAL;
2259 }
2260 new_state = (int)tmp;
2261
2262 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
2263 printk(KERN_ERR "Unable to process implict configfs ALUA"
2264 " transition while TPGS_IMPLICT_ALUA is diabled\n");
2265 return -EINVAL;
2266 }
2267
2268 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
2269 NULL, NULL, new_state, 0);
2270 return (!ret) ? count : -EINVAL;
2271}
2272
2273SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
2274
2275/*
2276 * alua_access_status
2277 */
2278static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2279 struct t10_alua_tg_pt_gp *tg_pt_gp,
2280 char *page)
2281{
2282 return sprintf(page, "%s\n",
2283 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2284}
2285
2286static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2287 struct t10_alua_tg_pt_gp *tg_pt_gp,
2288 const char *page,
2289 size_t count)
2290{
2291 unsigned long tmp;
2292 int new_status, ret;
2293
2294 if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
2295 printk(KERN_ERR "Unable to do set ALUA access status on non"
2296 " valid tg_pt_gp ID: %hu\n",
2297 tg_pt_gp->tg_pt_gp_valid_id);
2298 return -EINVAL;
2299 }
2300
2301 ret = strict_strtoul(page, 0, &tmp);
2302 if (ret < 0) {
2303 printk(KERN_ERR "Unable to extract new ALUA access status"
2304 " from %s\n", page);
2305 return -EINVAL;
2306 }
2307 new_status = (int)tmp;
2308
2309 if ((new_status != ALUA_STATUS_NONE) &&
2310 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
2311 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
2312 printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
2313 new_status);
2314 return -EINVAL;
2315 }
2316
2317 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2318 return count;
2319}
2320
2321SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2322
2323/*
2324 * alua_access_type
2325 */
2326static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2327 struct t10_alua_tg_pt_gp *tg_pt_gp,
2328 char *page)
2329{
2330 return core_alua_show_access_type(tg_pt_gp, page);
2331}
2332
2333static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2334 struct t10_alua_tg_pt_gp *tg_pt_gp,
2335 const char *page,
2336 size_t count)
2337{
2338 return core_alua_store_access_type(tg_pt_gp, page, count);
2339}
2340
2341SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2342
2343/*
2344 * alua_write_metadata
2345 */
2346static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2347 struct t10_alua_tg_pt_gp *tg_pt_gp,
2348 char *page)
2349{
2350 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2351}
2352
2353static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2354 struct t10_alua_tg_pt_gp *tg_pt_gp,
2355 const char *page,
2356 size_t count)
2357{
2358 unsigned long tmp;
2359 int ret;
2360
2361 ret = strict_strtoul(page, 0, &tmp);
2362 if (ret < 0) {
2363 printk(KERN_ERR "Unable to extract alua_write_metadata\n");
2364 return -EINVAL;
2365 }
2366
2367 if ((tmp != 0) && (tmp != 1)) {
2368 printk(KERN_ERR "Illegal value for alua_write_metadata:"
2369 " %lu\n", tmp);
2370 return -EINVAL;
2371 }
2372 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2373
2374 return count;
2375}
2376
2377SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2378
2379
2380
2381/*
2382 * nonop_delay_msecs
2383 */
2384static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2385 struct t10_alua_tg_pt_gp *tg_pt_gp,
2386 char *page)
2387{
2388 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2389
2390}
2391
2392static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2393 struct t10_alua_tg_pt_gp *tg_pt_gp,
2394 const char *page,
2395 size_t count)
2396{
2397 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2398}
2399
2400SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2401
2402/*
2403 * trans_delay_msecs
2404 */
2405static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2406 struct t10_alua_tg_pt_gp *tg_pt_gp,
2407 char *page)
2408{
2409 return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2410}
2411
2412static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2413 struct t10_alua_tg_pt_gp *tg_pt_gp,
2414 const char *page,
2415 size_t count)
2416{
2417 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2418}
2419
2420SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2421
2422/*
2423 * preferred
2424 */
2425
2426static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2427 struct t10_alua_tg_pt_gp *tg_pt_gp,
2428 char *page)
2429{
2430 return core_alua_show_preferred_bit(tg_pt_gp, page);
2431}
2432
2433static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2434 struct t10_alua_tg_pt_gp *tg_pt_gp,
2435 const char *page,
2436 size_t count)
2437{
2438 return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2439}
2440
2441SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2442
2443/*
2444 * tg_pt_gp_id
2445 */
2446static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2447 struct t10_alua_tg_pt_gp *tg_pt_gp,
2448 char *page)
2449{
2450 if (!(tg_pt_gp->tg_pt_gp_valid_id))
2451 return 0;
2452
2453 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2454}
2455
2456static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2457 struct t10_alua_tg_pt_gp *tg_pt_gp,
2458 const char *page,
2459 size_t count)
2460{
2461 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2462 unsigned long tg_pt_gp_id;
2463 int ret;
2464
2465 ret = strict_strtoul(page, 0, &tg_pt_gp_id);
2466 if (ret < 0) {
2467 printk(KERN_ERR "strict_strtoul() returned %d for"
2468 " tg_pt_gp_id\n", ret);
2469 return -EINVAL;
2470 }
2471 if (tg_pt_gp_id > 0x0000ffff) {
2472 printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
2473 " 0x0000ffff\n", tg_pt_gp_id);
2474 return -EINVAL;
2475 }
2476
2477 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2478 if (ret < 0)
2479 return -EINVAL;
2480
2481 printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
2482 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2483 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2484 tg_pt_gp->tg_pt_gp_id);
2485
2486 return count;
2487}
2488
2489SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2490
2491/*
2492 * members
2493 */
2494static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2495 struct t10_alua_tg_pt_gp *tg_pt_gp,
2496 char *page)
2497{
2498 struct se_port *port;
2499 struct se_portal_group *tpg;
2500 struct se_lun *lun;
2501 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2502 ssize_t len = 0, cur_len;
2503 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2504
2505 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2506
2507 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2508 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2509 tg_pt_gp_mem_list) {
2510 port = tg_pt_gp_mem->tg_pt;
2511 tpg = port->sep_tpg;
2512 lun = port->sep_lun;
2513
2514 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2515 "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
2516 TPG_TFO(tpg)->tpg_get_wwn(tpg),
2517 TPG_TFO(tpg)->tpg_get_tag(tpg),
2518 config_item_name(&lun->lun_group.cg_item));
2519 cur_len++; /* Extra byte for NULL terminator */
2520
2521 if ((cur_len + len) > PAGE_SIZE) {
2522 printk(KERN_WARNING "Ran out of lu_gp_show_attr"
2523 "_members buffer\n");
2524 break;
2525 }
2526 memcpy(page+len, buf, cur_len);
2527 len += cur_len;
2528 }
2529 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2530
2531 return len;
2532}
2533
2534SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2535
2536CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2537 tg_pt_gp_group);
2538
2539static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2540 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2541 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2542 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2543 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2544 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2545 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2546 &target_core_alua_tg_pt_gp_preferred.attr,
2547 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2548 &target_core_alua_tg_pt_gp_members.attr,
2549 NULL,
2550};
2551
2552static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2553 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2554 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2555};
2556
2557static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2558 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2559 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2560 .ct_owner = THIS_MODULE,
2561};
2562
2563/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2564
2565/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2566
2567static struct config_group *target_core_alua_create_tg_pt_gp(
2568 struct config_group *group,
2569 const char *name)
2570{
2571 struct t10_alua *alua = container_of(group, struct t10_alua,
2572 alua_tg_pt_gps_group);
2573 struct t10_alua_tg_pt_gp *tg_pt_gp;
2574 struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2575 struct config_group *alua_tg_pt_gp_cg = NULL;
2576 struct config_item *alua_tg_pt_gp_ci = NULL;
2577
2578 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
2579 if (!(tg_pt_gp))
2580 return NULL;
2581
2582 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2583 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2584
2585 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2586 &target_core_alua_tg_pt_gp_cit);
2587
2588 printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
2589 " Group: alua/tg_pt_gps/%s\n",
2590 config_item_name(alua_tg_pt_gp_ci));
2591
2592 return alua_tg_pt_gp_cg;
2593}
2594
2595static void target_core_alua_drop_tg_pt_gp(
2596 struct config_group *group,
2597 struct config_item *item)
2598{
2599 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2600 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2601
2602 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
2603 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2604 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2605
2606 config_item_put(item);
2607 core_alua_free_tg_pt_gp(tg_pt_gp);
2608}
2609
2610static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2611 .make_group = &target_core_alua_create_tg_pt_gp,
2612 .drop_item = &target_core_alua_drop_tg_pt_gp,
2613};
2614
2615static struct config_item_type target_core_alua_tg_pt_gps_cit = {
2616 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
2617 .ct_owner = THIS_MODULE,
2618};
2619
2620/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2621
2622/* Start functions for struct config_item_type target_core_alua_cit */
2623
2624/*
2625 * target_core_alua_cit is a ConfigFS group that lives under
2626 * /sys/kernel/config/target/core/alua. There are default groups
2627 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2628 * target_core_alua_cit in target_core_init_configfs() below.
2629 */
2630static struct config_item_type target_core_alua_cit = {
2631 .ct_item_ops = NULL,
2632 .ct_attrs = NULL,
2633 .ct_owner = THIS_MODULE,
2634};
2635
2636/* End functions for struct config_item_type target_core_alua_cit */
2637
2638/* Start functions for struct config_item_type target_core_hba_cit */
2639
2640static struct config_group *target_core_make_subdev(
2641 struct config_group *group,
2642 const char *name)
2643{
2644 struct t10_alua_tg_pt_gp *tg_pt_gp;
2645 struct se_subsystem_dev *se_dev;
2646 struct se_subsystem_api *t;
2647 struct config_item *hba_ci = &group->cg_item;
2648 struct se_hba *hba = item_to_hba(hba_ci);
2649 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2650
2651 if (mutex_lock_interruptible(&hba->hba_access_mutex))
2652 return NULL;
2653
2654 /*
2655 * Locate the struct se_subsystem_api from parent's struct se_hba.
2656 */
2657 t = hba->transport;
2658
2659 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
2660 if (!se_dev) {
2661 printk(KERN_ERR "Unable to allocate memory for"
2662 " struct se_subsystem_dev\n");
2663 goto unlock;
2664 }
2665 INIT_LIST_HEAD(&se_dev->g_se_dev_list);
2666 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2667 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2668 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
2669 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
2670 spin_lock_init(&se_dev->t10_reservation.registration_lock);
2671 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
2672 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2673 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2674 spin_lock_init(&se_dev->se_dev_lock);
2675 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2676 se_dev->t10_wwn.t10_sub_dev = se_dev;
2677 se_dev->t10_alua.t10_sub_dev = se_dev;
2678 se_dev->se_dev_attrib.da_sub_dev = se_dev;
2679
2680 se_dev->se_dev_hba = hba;
2681 dev_cg = &se_dev->se_dev_group;
2682
2683 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
2684 GFP_KERNEL);
2685 if (!(dev_cg->default_groups))
2686 goto out;
2687 /*
2688 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2689 * for ->allocate_virtdevice()
2690 *
2691 * se_dev->se_dev_ptr will be set after ->create_virtdev()
2692 * has been called successfully in the next level up in the
2693 * configfs tree for device object's struct config_group.
2694 */
2695 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2696 if (!(se_dev->se_dev_su_ptr)) {
2697 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
2698 " from allocate_virtdevice()\n");
2699 goto out;
2700 }
2701 spin_lock(&se_global->g_device_lock);
2702 list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
2703 spin_unlock(&se_global->g_device_lock);
2704
2705 config_group_init_type_name(&se_dev->se_dev_group, name,
2706 &target_core_dev_cit);
2707 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2708 &target_core_dev_attrib_cit);
2709 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
2710 &target_core_dev_pr_cit);
2711 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
2712 &target_core_dev_wwn_cit);
2713 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
2714 "alua", &target_core_alua_tg_pt_gps_cit);
2715 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
2716 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
2717 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
2718 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
2719 dev_cg->default_groups[4] = NULL;
2720 /*
2721 * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
2722 */
2723 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
2724 if (!(tg_pt_gp))
2725 goto out;
2726
2727 tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
2728 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2729 GFP_KERNEL);
2730 if (!(tg_pt_gp_cg->default_groups)) {
2731 printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
2732 "default_groups\n");
2733 goto out;
2734 }
2735
2736 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2737 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2738 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2739 tg_pt_gp_cg->default_groups[1] = NULL;
2740 T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
2741
2742 printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2743 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2744
2745 mutex_unlock(&hba->hba_access_mutex);
2746 return &se_dev->se_dev_group;
2747out:
2748 if (T10_ALUA(se_dev)->default_tg_pt_gp) {
2749 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
2750 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2751 }
2752 if (tg_pt_gp_cg)
2753 kfree(tg_pt_gp_cg->default_groups);
2754 if (dev_cg)
2755 kfree(dev_cg->default_groups);
2756 if (se_dev->se_dev_su_ptr)
2757 t->free_device(se_dev->se_dev_su_ptr);
2758 kfree(se_dev);
2759unlock:
2760 mutex_unlock(&hba->hba_access_mutex);
2761 return NULL;
2762}
2763
2764static void target_core_drop_subdev(
2765 struct config_group *group,
2766 struct config_item *item)
2767{
2768 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
2769 struct se_subsystem_dev, se_dev_group);
2770 struct se_hba *hba;
2771 struct se_subsystem_api *t;
2772 struct config_item *df_item;
2773 struct config_group *dev_cg, *tg_pt_gp_cg;
2774 int i, ret;
2775
2776 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2777
2778 if (mutex_lock_interruptible(&hba->hba_access_mutex))
2779 goto out;
2780
2781 t = hba->transport;
2782
2783 spin_lock(&se_global->g_device_lock);
2784 list_del(&se_dev->g_se_dev_list);
2785 spin_unlock(&se_global->g_device_lock);
2786
2787 tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
2788 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2789 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2790 tg_pt_gp_cg->default_groups[i] = NULL;
2791 config_item_put(df_item);
2792 }
2793 kfree(tg_pt_gp_cg->default_groups);
2794 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
2795 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2796
2797 dev_cg = &se_dev->se_dev_group;
2798 for (i = 0; dev_cg->default_groups[i]; i++) {
2799 df_item = &dev_cg->default_groups[i]->cg_item;
2800 dev_cg->default_groups[i] = NULL;
2801 config_item_put(df_item);
2802 }
2803
2804 config_item_put(item);
2805 /*
2806 * This pointer will set when the storage is enabled with:
2807 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
2808 */
2809 if (se_dev->se_dev_ptr) {
2810 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
2811 "virtual_device() for se_dev_ptr: %p\n",
2812 se_dev->se_dev_ptr);
2813
2814 ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
2815 if (ret < 0)
2816 goto hba_out;
2817 } else {
2818 /*
2819 * Release struct se_subsystem_dev->se_dev_su_ptr..
2820 */
2821 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
2822 "device() for se_dev_su_ptr: %p\n",
2823 se_dev->se_dev_su_ptr);
2824
2825 t->free_device(se_dev->se_dev_su_ptr);
2826 }
2827
2828 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2829 "_dev_t: %p\n", se_dev);
2830
2831hba_out:
2832 mutex_unlock(&hba->hba_access_mutex);
2833out:
2834 kfree(se_dev);
2835}
2836
2837static struct configfs_group_operations target_core_hba_group_ops = {
2838 .make_group = target_core_make_subdev,
2839 .drop_item = target_core_drop_subdev,
2840};
2841
2842CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2843#define SE_HBA_ATTR(_name, _mode) \
2844static struct target_core_hba_attribute \
2845 target_core_hba_##_name = \
2846 __CONFIGFS_EATTR(_name, _mode, \
2847 target_core_hba_show_attr_##_name, \
2848 target_core_hba_store_attr_##_name);
2849
2850#define SE_HBA_ATTR_RO(_name) \
2851static struct target_core_hba_attribute \
2852 target_core_hba_##_name = \
2853 __CONFIGFS_EATTR_RO(_name, \
2854 target_core_hba_show_attr_##_name);
2855
2856static ssize_t target_core_hba_show_attr_hba_info(
2857 struct se_hba *hba,
2858 char *page)
2859{
2860 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2861 hba->hba_id, hba->transport->name,
2862 TARGET_CORE_CONFIGFS_VERSION);
2863}
2864
2865SE_HBA_ATTR_RO(hba_info);
2866
2867static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2868 char *page)
2869{
2870 int hba_mode = 0;
2871
2872 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2873 hba_mode = 1;
2874
2875 return sprintf(page, "%d\n", hba_mode);
2876}
2877
2878static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2879 const char *page, size_t count)
2880{
2881 struct se_subsystem_api *transport = hba->transport;
2882 unsigned long mode_flag;
2883 int ret;
2884
2885 if (transport->pmode_enable_hba == NULL)
2886 return -EINVAL;
2887
2888 ret = strict_strtoul(page, 0, &mode_flag);
2889 if (ret < 0) {
2890 printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
2891 return -EINVAL;
2892 }
2893
2894 spin_lock(&hba->device_lock);
2895 if (!(list_empty(&hba->hba_dev_list))) {
2896 printk(KERN_ERR "Unable to set hba_mode with active devices\n");
2897 spin_unlock(&hba->device_lock);
2898 return -EINVAL;
2899 }
2900 spin_unlock(&hba->device_lock);
2901
2902 ret = transport->pmode_enable_hba(hba, mode_flag);
2903 if (ret < 0)
2904 return -EINVAL;
2905 if (ret > 0)
2906 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
2907 else if (ret == 0)
2908 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
2909
2910 return count;
2911}
2912
2913SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2914
2915CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2916
2917static struct configfs_attribute *target_core_hba_attrs[] = {
2918 &target_core_hba_hba_info.attr,
2919 &target_core_hba_hba_mode.attr,
2920 NULL,
2921};
2922
2923static struct configfs_item_operations target_core_hba_item_ops = {
2924 .show_attribute = target_core_hba_attr_show,
2925 .store_attribute = target_core_hba_attr_store,
2926};
2927
2928static struct config_item_type target_core_hba_cit = {
2929 .ct_item_ops = &target_core_hba_item_ops,
2930 .ct_group_ops = &target_core_hba_group_ops,
2931 .ct_attrs = target_core_hba_attrs,
2932 .ct_owner = THIS_MODULE,
2933};
2934
2935static struct config_group *target_core_call_addhbatotarget(
2936 struct config_group *group,
2937 const char *name)
2938{
2939 char *se_plugin_str, *str, *str2;
2940 struct se_hba *hba;
2941 char buf[TARGET_CORE_NAME_MAX_LEN];
2942 unsigned long plugin_dep_id = 0;
2943 int ret;
2944
2945 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
2946 if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
2947 printk(KERN_ERR "Passed *name strlen(): %d exceeds"
2948 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
2949 TARGET_CORE_NAME_MAX_LEN);
2950 return ERR_PTR(-ENAMETOOLONG);
2951 }
2952 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
2953
2954 str = strstr(buf, "_");
2955 if (!(str)) {
2956 printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
2957 return ERR_PTR(-EINVAL);
2958 }
2959 se_plugin_str = buf;
2960 /*
2961 * Special case for subsystem plugins that have "_" in their names.
2962 * Namely rd_direct and rd_mcp..
2963 */
2964 str2 = strstr(str+1, "_");
2965 if ((str2)) {
2966 *str2 = '\0'; /* Terminate for *se_plugin_str */
2967 str2++; /* Skip to start of plugin dependent ID */
2968 str = str2;
2969 } else {
2970 *str = '\0'; /* Terminate for *se_plugin_str */
2971 str++; /* Skip to start of plugin dependent ID */
2972 }
2973
2974 ret = strict_strtoul(str, 0, &plugin_dep_id);
2975 if (ret < 0) {
2976 printk(KERN_ERR "strict_strtoul() returned %d for"
2977 " plugin_dep_id\n", ret);
2978 return ERR_PTR(-EINVAL);
2979 }
2980 /*
2981 * Load up TCM subsystem plugins if they have not already been loaded.
2982 */
2983 if (transport_subsystem_check_init() < 0)
2984 return ERR_PTR(-EINVAL);
2985
2986 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
2987 if (IS_ERR(hba))
2988 return ERR_CAST(hba);
2989
2990 config_group_init_type_name(&hba->hba_group, name,
2991 &target_core_hba_cit);
2992
2993 return &hba->hba_group;
2994}
2995
2996static void target_core_call_delhbafromtarget(
2997 struct config_group *group,
2998 struct config_item *item)
2999{
3000 struct se_hba *hba = item_to_hba(item);
3001
3002 config_item_put(item);
3003 core_delete_hba(hba);
3004}
3005
3006static struct configfs_group_operations target_core_group_ops = {
3007 .make_group = target_core_call_addhbatotarget,
3008 .drop_item = target_core_call_delhbafromtarget,
3009};
3010
3011static struct config_item_type target_core_cit = {
3012 .ct_item_ops = NULL,
3013 .ct_group_ops = &target_core_group_ops,
3014 .ct_attrs = NULL,
3015 .ct_owner = THIS_MODULE,
3016};
3017
3018/* Stop functions for struct config_item_type target_core_hba_cit */
3019
3020static int target_core_init_configfs(void)
3021{
3022 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3023 struct config_group *lu_gp_cg = NULL;
3024 struct configfs_subsystem *subsys;
3025 struct proc_dir_entry *scsi_target_proc = NULL;
3026 struct t10_alua_lu_gp *lu_gp;
3027 int ret;
3028
3029 printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
3030 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3031 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3032
3033 subsys = target_core_subsystem[0];
3034 config_group_init(&subsys->su_group);
3035 mutex_init(&subsys->su_mutex);
3036
3037 INIT_LIST_HEAD(&g_tf_list);
3038 mutex_init(&g_tf_lock);
3039 init_scsi_index_table();
3040 ret = init_se_global();
3041 if (ret < 0)
3042 return -1;
3043 /*
3044 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3045 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3046 */
3047 target_cg = &subsys->su_group;
3048 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3049 GFP_KERNEL);
3050 if (!(target_cg->default_groups)) {
3051 printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
3052 goto out_global;
3053 }
3054
3055 config_group_init_type_name(&se_global->target_core_hbagroup,
3056 "core", &target_core_cit);
3057 target_cg->default_groups[0] = &se_global->target_core_hbagroup;
3058 target_cg->default_groups[1] = NULL;
3059 /*
3060 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3061 */
3062 hba_cg = &se_global->target_core_hbagroup;
3063 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3064 GFP_KERNEL);
3065 if (!(hba_cg->default_groups)) {
3066 printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
3067 goto out_global;
3068 }
3069 config_group_init_type_name(&se_global->alua_group,
3070 "alua", &target_core_alua_cit);
3071 hba_cg->default_groups[0] = &se_global->alua_group;
3072 hba_cg->default_groups[1] = NULL;
3073 /*
3074 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3075 * groups under /sys/kernel/config/target/core/alua/
3076 */
3077 alua_cg = &se_global->alua_group;
3078 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3079 GFP_KERNEL);
3080 if (!(alua_cg->default_groups)) {
3081 printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
3082 goto out_global;
3083 }
3084
3085 config_group_init_type_name(&se_global->alua_lu_gps_group,
3086 "lu_gps", &target_core_alua_lu_gps_cit);
3087 alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
3088 alua_cg->default_groups[1] = NULL;
3089 /*
3090 * Add core/alua/lu_gps/default_lu_gp
3091 */
3092 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3093 if (IS_ERR(lu_gp))
3094 goto out_global;
3095
3096 lu_gp_cg = &se_global->alua_lu_gps_group;
3097 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3098 GFP_KERNEL);
3099 if (!(lu_gp_cg->default_groups)) {
3100 printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
3101 goto out_global;
3102 }
3103
3104 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3105 &target_core_alua_lu_gp_cit);
3106 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3107 lu_gp_cg->default_groups[1] = NULL;
3108 se_global->default_lu_gp = lu_gp;
3109 /*
3110 * Register the target_core_mod subsystem with configfs.
3111 */
3112 ret = configfs_register_subsystem(subsys);
3113 if (ret < 0) {
3114 printk(KERN_ERR "Error %d while registering subsystem %s\n",
3115 ret, subsys->su_group.cg_item.ci_namebuf);
3116 goto out_global;
3117 }
3118 printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
3119 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3120 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3121 /*
3122 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3123 */
3124 ret = rd_module_init();
3125 if (ret < 0)
3126 goto out;
3127
3128 if (core_dev_setup_virtual_lun0() < 0)
3129 goto out;
3130
3131 scsi_target_proc = proc_mkdir("scsi_target", 0);
3132 if (!(scsi_target_proc)) {
3133 printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
3134 goto out;
3135 }
3136 ret = init_scsi_target_mib();
3137 if (ret < 0)
3138 goto out;
3139
3140 return 0;
3141
3142out:
3143 configfs_unregister_subsystem(subsys);
3144 if (scsi_target_proc)
3145 remove_proc_entry("scsi_target", 0);
3146 core_dev_release_virtual_lun0();
3147 rd_module_exit();
3148out_global:
3149 if (se_global->default_lu_gp) {
3150 core_alua_free_lu_gp(se_global->default_lu_gp);
3151 se_global->default_lu_gp = NULL;
3152 }
3153 if (lu_gp_cg)
3154 kfree(lu_gp_cg->default_groups);
3155 if (alua_cg)
3156 kfree(alua_cg->default_groups);
3157 if (hba_cg)
3158 kfree(hba_cg->default_groups);
3159 kfree(target_cg->default_groups);
3160 release_se_global();
3161 return -1;
3162}
3163
3164static void target_core_exit_configfs(void)
3165{
3166 struct configfs_subsystem *subsys;
3167 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3168 struct config_item *item;
3169 int i;
3170
3171 se_global->in_shutdown = 1;
3172 subsys = target_core_subsystem[0];
3173
3174 lu_gp_cg = &se_global->alua_lu_gps_group;
3175 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3176 item = &lu_gp_cg->default_groups[i]->cg_item;
3177 lu_gp_cg->default_groups[i] = NULL;
3178 config_item_put(item);
3179 }
3180 kfree(lu_gp_cg->default_groups);
3181 core_alua_free_lu_gp(se_global->default_lu_gp);
3182 se_global->default_lu_gp = NULL;
3183
3184 alua_cg = &se_global->alua_group;
3185 for (i = 0; alua_cg->default_groups[i]; i++) {
3186 item = &alua_cg->default_groups[i]->cg_item;
3187 alua_cg->default_groups[i] = NULL;
3188 config_item_put(item);
3189 }
3190 kfree(alua_cg->default_groups);
3191
3192 hba_cg = &se_global->target_core_hbagroup;
3193 for (i = 0; hba_cg->default_groups[i]; i++) {
3194 item = &hba_cg->default_groups[i]->cg_item;
3195 hba_cg->default_groups[i] = NULL;
3196 config_item_put(item);
3197 }
3198 kfree(hba_cg->default_groups);
3199
3200 for (i = 0; subsys->su_group.default_groups[i]; i++) {
3201 item = &subsys->su_group.default_groups[i]->cg_item;
3202 subsys->su_group.default_groups[i] = NULL;
3203 config_item_put(item);
3204 }
3205 kfree(subsys->su_group.default_groups);
3206
3207 configfs_unregister_subsystem(subsys);
3208 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
3209 " Infrastructure\n");
3210
3211 remove_scsi_target_mib();
3212 remove_proc_entry("scsi_target", 0);
3213 core_dev_release_virtual_lun0();
3214 rd_module_exit();
3215 release_se_global();
3216
3217 return;
3218}
3219
3220MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3221MODULE_AUTHOR("nab@Linux-iSCSI.org");
3222MODULE_LICENSE("GPL");
3223
3224module_init(target_core_init_configfs);
3225module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 000000000000..317ce58d426d
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1694 @@
1/*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the iSCSI Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/net.h>
31#include <linux/string.h>
32#include <linux/delay.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/smp_lock.h>
37#include <linux/kthread.h>
38#include <linux/in.h>
39#include <net/sock.h>
40#include <net/tcp.h>
41#include <scsi/scsi.h>
42
43#include <target/target_core_base.h>
44#include <target/target_core_device.h>
45#include <target/target_core_tpg.h>
46#include <target/target_core_transport.h>
47#include <target/target_core_fabric_ops.h>
48
49#include "target_core_alua.h"
50#include "target_core_hba.h"
51#include "target_core_pr.h"
52#include "target_core_ua.h"
53
54static void se_dev_start(struct se_device *dev);
55static void se_dev_stop(struct se_device *dev);
56
57int transport_get_lun_for_cmd(
58 struct se_cmd *se_cmd,
59 unsigned char *cdb,
60 u32 unpacked_lun)
61{
62 struct se_dev_entry *deve;
63 struct se_lun *se_lun = NULL;
64 struct se_session *se_sess = SE_SESS(se_cmd);
65 unsigned long flags;
66 int read_only = 0;
67
68 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
69 deve = se_cmd->se_deve =
70 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
71 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
72 if (se_cmd) {
73 deve->total_cmds++;
74 deve->total_bytes += se_cmd->data_length;
75
76 if (se_cmd->data_direction == DMA_TO_DEVICE) {
77 if (deve->lun_flags &
78 TRANSPORT_LUNFLAGS_READ_ONLY) {
79 read_only = 1;
80 goto out;
81 }
82 deve->write_bytes += se_cmd->data_length;
83 } else if (se_cmd->data_direction ==
84 DMA_FROM_DEVICE) {
85 deve->read_bytes += se_cmd->data_length;
86 }
87 }
88 deve->deve_cmds++;
89
90 se_lun = se_cmd->se_lun = deve->se_lun;
91 se_cmd->pr_res_key = deve->pr_res_key;
92 se_cmd->orig_fe_lun = unpacked_lun;
93 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95 }
96out:
97 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
98
99 if (!se_lun) {
100 if (read_only) {
101 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
102 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
103 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
104 " Access for 0x%08x\n",
105 CMD_TFO(se_cmd)->get_fabric_name(),
106 unpacked_lun);
107 return -1;
108 } else {
109 /*
110 * Use the se_portal_group->tpg_virt_lun0 to allow for
111 * REPORT_LUNS, et al to be returned when no active
112 * MappedLUN=0 exists for this Initiator Port.
113 */
114 if (unpacked_lun != 0) {
115 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
116 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
117 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
118 " Access for 0x%08x\n",
119 CMD_TFO(se_cmd)->get_fabric_name(),
120 unpacked_lun);
121 return -1;
122 }
123 /*
124 * Force WRITE PROTECT for virtual LUN 0
125 */
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
129 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
130 return -1;
131 }
132#if 0
133 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
134 CMD_TFO(se_cmd)->get_fabric_name());
135#endif
136 se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->orig_fe_lun = 0;
138 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
140 }
141 }
142 /*
143 * Determine if the struct se_lun is online.
144 */
145/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
146 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
147 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
148 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
149 return -1;
150 }
151
152 {
153 struct se_device *dev = se_lun->lun_se_dev;
154 spin_lock(&dev->stats_lock);
155 dev->num_cmds++;
156 if (se_cmd->data_direction == DMA_TO_DEVICE)
157 dev->write_bytes += se_cmd->data_length;
158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
159 dev->read_bytes += se_cmd->data_length;
160 spin_unlock(&dev->stats_lock);
161 }
162
163 /*
164 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
165 * for tracking state of struct se_cmds during LUN shutdown events.
166 */
167 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
168 list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
169 atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
170#if 0
171 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
172 CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
173#endif
174 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
175
176 return 0;
177}
178EXPORT_SYMBOL(transport_get_lun_for_cmd);
179
180int transport_get_lun_for_tmr(
181 struct se_cmd *se_cmd,
182 u32 unpacked_lun)
183{
184 struct se_device *dev = NULL;
185 struct se_dev_entry *deve;
186 struct se_lun *se_lun = NULL;
187 struct se_session *se_sess = SE_SESS(se_cmd);
188 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
189
190 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
191 deve = se_cmd->se_deve =
192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
196 se_cmd->pr_res_key = deve->pr_res_key;
197 se_cmd->orig_fe_lun = unpacked_lun;
198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
199/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
200 }
201 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
202
203 if (!se_lun) {
204 printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
205 " Access for 0x%08x\n",
206 CMD_TFO(se_cmd)->get_fabric_name(),
207 unpacked_lun);
208 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
209 return -1;
210 }
211 /*
212 * Determine if the struct se_lun is online.
213 */
214/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
215 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
217 return -1;
218 }
219
220 spin_lock(&dev->se_tmr_lock);
221 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
222 spin_unlock(&dev->se_tmr_lock);
223
224 return 0;
225}
226EXPORT_SYMBOL(transport_get_lun_for_tmr);
227
228/*
229 * This function is called from core_scsi3_emulate_pro_register_and_move()
230 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
231 * when a matching rtpi is found.
232 */
233struct se_dev_entry *core_get_se_deve_from_rtpi(
234 struct se_node_acl *nacl,
235 u16 rtpi)
236{
237 struct se_dev_entry *deve;
238 struct se_lun *lun;
239 struct se_port *port;
240 struct se_portal_group *tpg = nacl->se_tpg;
241 u32 i;
242
243 spin_lock_irq(&nacl->device_list_lock);
244 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
245 deve = &nacl->device_list[i];
246
247 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
248 continue;
249
250 lun = deve->se_lun;
251 if (!(lun)) {
252 printk(KERN_ERR "%s device entries device pointer is"
253 " NULL, but Initiator has access.\n",
254 TPG_TFO(tpg)->get_fabric_name());
255 continue;
256 }
257 port = lun->lun_sep;
258 if (!(port)) {
259 printk(KERN_ERR "%s device entries device pointer is"
260 " NULL, but Initiator has access.\n",
261 TPG_TFO(tpg)->get_fabric_name());
262 continue;
263 }
264 if (port->sep_rtpi != rtpi)
265 continue;
266
267 atomic_inc(&deve->pr_ref_count);
268 smp_mb__after_atomic_inc();
269 spin_unlock_irq(&nacl->device_list_lock);
270
271 return deve;
272 }
273 spin_unlock_irq(&nacl->device_list_lock);
274
275 return NULL;
276}
277
278int core_free_device_list_for_node(
279 struct se_node_acl *nacl,
280 struct se_portal_group *tpg)
281{
282 struct se_dev_entry *deve;
283 struct se_lun *lun;
284 u32 i;
285
286 if (!nacl->device_list)
287 return 0;
288
289 spin_lock_irq(&nacl->device_list_lock);
290 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
291 deve = &nacl->device_list[i];
292
293 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
294 continue;
295
296 if (!deve->se_lun) {
297 printk(KERN_ERR "%s device entries device pointer is"
298 " NULL, but Initiator has access.\n",
299 TPG_TFO(tpg)->get_fabric_name());
300 continue;
301 }
302 lun = deve->se_lun;
303
304 spin_unlock_irq(&nacl->device_list_lock);
305 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
306 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
307 spin_lock_irq(&nacl->device_list_lock);
308 }
309 spin_unlock_irq(&nacl->device_list_lock);
310
311 kfree(nacl->device_list);
312 nacl->device_list = NULL;
313
314 return 0;
315}
316
317void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
318{
319 struct se_dev_entry *deve;
320
321 spin_lock_irq(&se_nacl->device_list_lock);
322 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
323 deve->deve_cmds--;
324 spin_unlock_irq(&se_nacl->device_list_lock);
325
326 return;
327}
328
329void core_update_device_list_access(
330 u32 mapped_lun,
331 u32 lun_access,
332 struct se_node_acl *nacl)
333{
334 struct se_dev_entry *deve;
335
336 spin_lock_irq(&nacl->device_list_lock);
337 deve = &nacl->device_list[mapped_lun];
338 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
341 } else {
342 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
343 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
344 }
345 spin_unlock_irq(&nacl->device_list_lock);
346
347 return;
348}
349
350/* core_update_device_list_for_node():
351 *
352 *
353 */
354int core_update_device_list_for_node(
355 struct se_lun *lun,
356 struct se_lun_acl *lun_acl,
357 u32 mapped_lun,
358 u32 lun_access,
359 struct se_node_acl *nacl,
360 struct se_portal_group *tpg,
361 int enable)
362{
363 struct se_port *port = lun->lun_sep;
364 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
365 int trans = 0;
366 /*
367 * If the MappedLUN entry is being disabled, the entry in
368 * port->sep_alua_list must be removed now before clearing the
369 * struct se_dev_entry pointers below as logic in
370 * core_alua_do_transition_tg_pt() depends on these being present.
371 */
372 if (!(enable)) {
373 /*
374 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 * that have not been explictly concerted to MappedLUNs ->
376 * struct se_lun_acl.
377 */
378 if (!(deve->se_lun_acl))
379 return 0;
380
381 spin_lock_bh(&port->sep_alua_lock);
382 list_del(&deve->alua_port_list);
383 spin_unlock_bh(&port->sep_alua_lock);
384 }
385
386 spin_lock_irq(&nacl->device_list_lock);
387 if (enable) {
388 /*
389 * Check if the call is handling demo mode -> explict LUN ACL
390 * transition. This transition must be for the same struct se_lun
391 * + mapped_lun that was setup in demo mode..
392 */
393 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
394 if (deve->se_lun_acl != NULL) {
395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
396 " already set for demo mode -> explict"
397 " LUN ACL transition\n");
398 return -1;
399 }
400 if (deve->se_lun != lun) {
401 printk(KERN_ERR "struct se_dev_entry->se_lun does"
402 " match passed struct se_lun for demo mode"
403 " -> explict LUN ACL transition\n");
404 return -1;
405 }
406 deve->se_lun_acl = lun_acl;
407 trans = 1;
408 } else {
409 deve->se_lun = lun;
410 deve->se_lun_acl = lun_acl;
411 deve->mapped_lun = mapped_lun;
412 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
413 }
414
415 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
416 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
417 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
418 } else {
419 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
420 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
421 }
422
423 if (trans) {
424 spin_unlock_irq(&nacl->device_list_lock);
425 return 0;
426 }
427 deve->creation_time = get_jiffies_64();
428 deve->attach_count++;
429 spin_unlock_irq(&nacl->device_list_lock);
430
431 spin_lock_bh(&port->sep_alua_lock);
432 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
433 spin_unlock_bh(&port->sep_alua_lock);
434
435 return 0;
436 }
437 /*
438 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
439 * PR operation to complete.
440 */
441 spin_unlock_irq(&nacl->device_list_lock);
442 while (atomic_read(&deve->pr_ref_count) != 0)
443 cpu_relax();
444 spin_lock_irq(&nacl->device_list_lock);
445 /*
446 * Disable struct se_dev_entry LUN ACL mapping
447 */
448 core_scsi3_ua_release_all(deve);
449 deve->se_lun = NULL;
450 deve->se_lun_acl = NULL;
451 deve->lun_flags = 0;
452 deve->creation_time = 0;
453 deve->attach_count--;
454 spin_unlock_irq(&nacl->device_list_lock);
455
456 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
457 return 0;
458}
459
460/* core_clear_lun_from_tpg():
461 *
462 *
463 */
464void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
465{
466 struct se_node_acl *nacl;
467 struct se_dev_entry *deve;
468 u32 i;
469
470 spin_lock_bh(&tpg->acl_node_lock);
471 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
472 spin_unlock_bh(&tpg->acl_node_lock);
473
474 spin_lock_irq(&nacl->device_list_lock);
475 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
476 deve = &nacl->device_list[i];
477 if (lun != deve->se_lun)
478 continue;
479 spin_unlock_irq(&nacl->device_list_lock);
480
481 core_update_device_list_for_node(lun, NULL,
482 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
483 nacl, tpg, 0);
484
485 spin_lock_irq(&nacl->device_list_lock);
486 }
487 spin_unlock_irq(&nacl->device_list_lock);
488
489 spin_lock_bh(&tpg->acl_node_lock);
490 }
491 spin_unlock_bh(&tpg->acl_node_lock);
492
493 return;
494}
495
496static struct se_port *core_alloc_port(struct se_device *dev)
497{
498 struct se_port *port, *port_tmp;
499
500 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
501 if (!(port)) {
502 printk(KERN_ERR "Unable to allocate struct se_port\n");
503 return NULL;
504 }
505 INIT_LIST_HEAD(&port->sep_alua_list);
506 INIT_LIST_HEAD(&port->sep_list);
507 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
508 spin_lock_init(&port->sep_alua_lock);
509 mutex_init(&port->sep_tg_pt_md_mutex);
510
511 spin_lock(&dev->se_port_lock);
512 if (dev->dev_port_count == 0x0000ffff) {
513 printk(KERN_WARNING "Reached dev->dev_port_count =="
514 " 0x0000ffff\n");
515 spin_unlock(&dev->se_port_lock);
516 return NULL;
517 }
518again:
519 /*
520 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
521 * Here is the table from spc4r17 section 7.7.3.8.
522 *
523 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
524 *
525 * Code Description
526 * 0h Reserved
527 * 1h Relative port 1, historically known as port A
528 * 2h Relative port 2, historically known as port B
529 * 3h to FFFFh Relative port 3 through 65 535
530 */
531 port->sep_rtpi = dev->dev_rpti_counter++;
532 if (!(port->sep_rtpi))
533 goto again;
534
535 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
536 /*
537 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
538 * for 16-bit wrap..
539 */
540 if (port->sep_rtpi == port_tmp->sep_rtpi)
541 goto again;
542 }
543 spin_unlock(&dev->se_port_lock);
544
545 return port;
546}
547
548static void core_export_port(
549 struct se_device *dev,
550 struct se_portal_group *tpg,
551 struct se_port *port,
552 struct se_lun *lun)
553{
554 struct se_subsystem_dev *su_dev = SU_DEV(dev);
555 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
556
557 spin_lock(&dev->se_port_lock);
558 spin_lock(&lun->lun_sep_lock);
559 port->sep_tpg = tpg;
560 port->sep_lun = lun;
561 lun->lun_sep = port;
562 spin_unlock(&lun->lun_sep_lock);
563
564 list_add_tail(&port->sep_list, &dev->dev_sep_list);
565 spin_unlock(&dev->se_port_lock);
566
567 if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
568 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
569 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
570 printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
571 "_gp_member_t\n");
572 return;
573 }
574 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
575 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
576 T10_ALUA(su_dev)->default_tg_pt_gp);
577 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
578 printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
579 " Group: alua/default_tg_pt_gp\n",
580 TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
581 }
582
583 dev->dev_port_count++;
584 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
585}
586
587/*
588 * Called with struct se_device->se_port_lock spinlock held.
589 */
590static void core_release_port(struct se_device *dev, struct se_port *port)
591{
592 /*
593 * Wait for any port reference for PR ALL_TG_PT=1 operation
594 * to complete in __core_scsi3_alloc_registration()
595 */
596 spin_unlock(&dev->se_port_lock);
597 if (atomic_read(&port->sep_tg_pt_ref_cnt))
598 cpu_relax();
599 spin_lock(&dev->se_port_lock);
600
601 core_alua_free_tg_pt_gp_mem(port);
602
603 list_del(&port->sep_list);
604 dev->dev_port_count--;
605 kfree(port);
606
607 return;
608}
609
610int core_dev_export(
611 struct se_device *dev,
612 struct se_portal_group *tpg,
613 struct se_lun *lun)
614{
615 struct se_port *port;
616
617 port = core_alloc_port(dev);
618 if (!(port))
619 return -1;
620
621 lun->lun_se_dev = dev;
622 se_dev_start(dev);
623
624 atomic_inc(&dev->dev_export_obj.obj_access_count);
625 core_export_port(dev, tpg, port, lun);
626 return 0;
627}
628
629void core_dev_unexport(
630 struct se_device *dev,
631 struct se_portal_group *tpg,
632 struct se_lun *lun)
633{
634 struct se_port *port = lun->lun_sep;
635
636 spin_lock(&lun->lun_sep_lock);
637 if (lun->lun_se_dev == NULL) {
638 spin_unlock(&lun->lun_sep_lock);
639 return;
640 }
641 spin_unlock(&lun->lun_sep_lock);
642
643 spin_lock(&dev->se_port_lock);
644 atomic_dec(&dev->dev_export_obj.obj_access_count);
645 core_release_port(dev, port);
646 spin_unlock(&dev->se_port_lock);
647
648 se_dev_stop(dev);
649 lun->lun_se_dev = NULL;
650}
651
652int transport_core_report_lun_response(struct se_cmd *se_cmd)
653{
654 struct se_dev_entry *deve;
655 struct se_lun *se_lun;
656 struct se_session *se_sess = SE_SESS(se_cmd);
657 struct se_task *se_task;
658 unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
659 u32 cdb_offset = 0, lun_count = 0, offset = 8;
660 u64 i, lun;
661
662 list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
663 break;
664
665 if (!(se_task)) {
666 printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
667 return PYX_TRANSPORT_LU_COMM_FAILURE;
668 }
669
670 /*
671 * If no struct se_session pointer is present, this struct se_cmd is
672 * coming via a target_core_mod PASSTHROUGH op, and not through
673 * a $FABRIC_MOD. In that case, report LUN=0 only.
674 */
675 if (!(se_sess)) {
676 lun = 0;
677 buf[offset++] = ((lun >> 56) & 0xff);
678 buf[offset++] = ((lun >> 48) & 0xff);
679 buf[offset++] = ((lun >> 40) & 0xff);
680 buf[offset++] = ((lun >> 32) & 0xff);
681 buf[offset++] = ((lun >> 24) & 0xff);
682 buf[offset++] = ((lun >> 16) & 0xff);
683 buf[offset++] = ((lun >> 8) & 0xff);
684 buf[offset++] = (lun & 0xff);
685 lun_count = 1;
686 goto done;
687 }
688
689 spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
690 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
691 deve = &SE_NODE_ACL(se_sess)->device_list[i];
692 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
693 continue;
694 se_lun = deve->se_lun;
695 /*
696 * We determine the correct LUN LIST LENGTH even once we
697 * have reached the initial allocation length.
698 * See SPC2-R20 7.19.
699 */
700 lun_count++;
701 if ((cdb_offset + 8) >= se_cmd->data_length)
702 continue;
703
704 lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
705 buf[offset++] = ((lun >> 56) & 0xff);
706 buf[offset++] = ((lun >> 48) & 0xff);
707 buf[offset++] = ((lun >> 40) & 0xff);
708 buf[offset++] = ((lun >> 32) & 0xff);
709 buf[offset++] = ((lun >> 24) & 0xff);
710 buf[offset++] = ((lun >> 16) & 0xff);
711 buf[offset++] = ((lun >> 8) & 0xff);
712 buf[offset++] = (lun & 0xff);
713 cdb_offset += 8;
714 }
715 spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
716
717 /*
718 * See SPC3 r07, page 159.
719 */
720done:
721 lun_count *= 8;
722 buf[0] = ((lun_count >> 24) & 0xff);
723 buf[1] = ((lun_count >> 16) & 0xff);
724 buf[2] = ((lun_count >> 8) & 0xff);
725 buf[3] = (lun_count & 0xff);
726
727 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
728}
729
730/* se_release_device_for_hba():
731 *
732 *
733 */
734void se_release_device_for_hba(struct se_device *dev)
735{
736 struct se_hba *hba = dev->se_hba;
737
738 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
739 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
740 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
741 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
742 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
743 se_dev_stop(dev);
744
745 if (dev->dev_ptr) {
746 kthread_stop(dev->process_thread);
747 if (dev->transport->free_device)
748 dev->transport->free_device(dev->dev_ptr);
749 }
750
751 spin_lock(&hba->device_lock);
752 list_del(&dev->dev_list);
753 hba->dev_count--;
754 spin_unlock(&hba->device_lock);
755
756 core_scsi3_free_all_registrations(dev);
757 se_release_vpd_for_dev(dev);
758
759 kfree(dev->dev_status_queue_obj);
760 kfree(dev->dev_queue_obj);
761 kfree(dev);
762
763 return;
764}
765
766void se_release_vpd_for_dev(struct se_device *dev)
767{
768 struct t10_vpd *vpd, *vpd_tmp;
769
770 spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
771 list_for_each_entry_safe(vpd, vpd_tmp,
772 &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
773 list_del(&vpd->vpd_list);
774 kfree(vpd);
775 }
776 spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
777
778 return;
779}
780
781/*
782 * Called with struct se_hba->device_lock held.
783 */
784void se_clear_dev_ports(struct se_device *dev)
785{
786 struct se_hba *hba = dev->se_hba;
787 struct se_lun *lun;
788 struct se_portal_group *tpg;
789 struct se_port *sep, *sep_tmp;
790
791 spin_lock(&dev->se_port_lock);
792 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
793 spin_unlock(&dev->se_port_lock);
794 spin_unlock(&hba->device_lock);
795
796 lun = sep->sep_lun;
797 tpg = sep->sep_tpg;
798 spin_lock(&lun->lun_sep_lock);
799 if (lun->lun_se_dev == NULL) {
800 spin_unlock(&lun->lun_sep_lock);
801 continue;
802 }
803 spin_unlock(&lun->lun_sep_lock);
804
805 core_dev_del_lun(tpg, lun->unpacked_lun);
806
807 spin_lock(&hba->device_lock);
808 spin_lock(&dev->se_port_lock);
809 }
810 spin_unlock(&dev->se_port_lock);
811
812 return;
813}
814
815/* se_free_virtual_device():
816 *
817 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
818 */
819int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
820{
821 spin_lock(&hba->device_lock);
822 se_clear_dev_ports(dev);
823 spin_unlock(&hba->device_lock);
824
825 core_alua_free_lu_gp_mem(dev);
826 se_release_device_for_hba(dev);
827
828 return 0;
829}
830
831static void se_dev_start(struct se_device *dev)
832{
833 struct se_hba *hba = dev->se_hba;
834
835 spin_lock(&hba->device_lock);
836 atomic_inc(&dev->dev_obj.obj_access_count);
837 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
838 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
839 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
840 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
841 } else if (dev->dev_status &
842 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
843 dev->dev_status &=
844 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
845 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
846 }
847 }
848 spin_unlock(&hba->device_lock);
849}
850
851static void se_dev_stop(struct se_device *dev)
852{
853 struct se_hba *hba = dev->se_hba;
854
855 spin_lock(&hba->device_lock);
856 atomic_dec(&dev->dev_obj.obj_access_count);
857 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
858 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
859 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
860 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
861 } else if (dev->dev_status &
862 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
863 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
864 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
865 }
866 }
867 spin_unlock(&hba->device_lock);
868
869 while (atomic_read(&hba->dev_mib_access_count))
870 cpu_relax();
871}
872
873int se_dev_check_online(struct se_device *dev)
874{
875 int ret;
876
877 spin_lock_irq(&dev->dev_status_lock);
878 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
879 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
880 spin_unlock_irq(&dev->dev_status_lock);
881
882 return ret;
883}
884
885int se_dev_check_shutdown(struct se_device *dev)
886{
887 int ret;
888
889 spin_lock_irq(&dev->dev_status_lock);
890 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
891 spin_unlock_irq(&dev->dev_status_lock);
892
893 return ret;
894}
895
896void se_dev_set_default_attribs(
897 struct se_device *dev,
898 struct se_dev_limits *dev_limits)
899{
900 struct queue_limits *limits = &dev_limits->limits;
901
902 DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
903 DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
904 DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
905 DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
906 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
907 DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
908 DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
909 DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
910 DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
911 DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
912 DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
913 /*
914 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
915 * iblock_create_virtdevice() from struct queue_limits values
916 * if blk_queue_discard()==1
917 */
918 DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
919 DEV_ATTRIB(dev)->max_unmap_block_desc_count =
920 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
921 DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
922 DEV_ATTRIB(dev)->unmap_granularity_alignment =
923 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
924 /*
925 * block_size is based on subsystem plugin dependent requirements.
926 */
927 DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
928 DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
929 /*
930 * max_sectors is based on subsystem plugin dependent requirements.
931 */
932 DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
933 DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
934 /*
935 * Set optimal_sectors from max_sectors, which can be lowered via
936 * configfs.
937 */
938 DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
939 /*
940 * queue_depth is based on subsystem plugin dependent requirements.
941 */
942 DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
943 DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
944}
945
946int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
947{
948 if (task_timeout > DA_TASK_TIMEOUT_MAX) {
949 printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
950 " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
951 return -1;
952 } else {
953 DEV_ATTRIB(dev)->task_timeout = task_timeout;
954 printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
955 dev, task_timeout);
956 }
957
958 return 0;
959}
960
961int se_dev_set_max_unmap_lba_count(
962 struct se_device *dev,
963 u32 max_unmap_lba_count)
964{
965 DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
966 printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
967 dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
968 return 0;
969}
970
971int se_dev_set_max_unmap_block_desc_count(
972 struct se_device *dev,
973 u32 max_unmap_block_desc_count)
974{
975 DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
976 printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
977 dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
978 return 0;
979}
980
981int se_dev_set_unmap_granularity(
982 struct se_device *dev,
983 u32 unmap_granularity)
984{
985 DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
986 printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
987 dev, DEV_ATTRIB(dev)->unmap_granularity);
988 return 0;
989}
990
991int se_dev_set_unmap_granularity_alignment(
992 struct se_device *dev,
993 u32 unmap_granularity_alignment)
994{
995 DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
996 printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
997 dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
998 return 0;
999}
1000
1001int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
1002{
1003 if ((flag != 0) && (flag != 1)) {
1004 printk(KERN_ERR "Illegal value %d\n", flag);
1005 return -1;
1006 }
1007 if (TRANSPORT(dev)->dpo_emulated == NULL) {
1008 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
1009 return -1;
1010 }
1011 if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
1012 printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
1013 return -1;
1014 }
1015 DEV_ATTRIB(dev)->emulate_dpo = flag;
1016 printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
1017 " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
1018 return 0;
1019}
1020
1021int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
1022{
1023 if ((flag != 0) && (flag != 1)) {
1024 printk(KERN_ERR "Illegal value %d\n", flag);
1025 return -1;
1026 }
1027 if (TRANSPORT(dev)->fua_write_emulated == NULL) {
1028 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
1029 return -1;
1030 }
1031 if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
1032 printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
1033 return -1;
1034 }
1035 DEV_ATTRIB(dev)->emulate_fua_write = flag;
1036 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1037 dev, DEV_ATTRIB(dev)->emulate_fua_write);
1038 return 0;
1039}
1040
1041int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
1042{
1043 if ((flag != 0) && (flag != 1)) {
1044 printk(KERN_ERR "Illegal value %d\n", flag);
1045 return -1;
1046 }
1047 if (TRANSPORT(dev)->fua_read_emulated == NULL) {
1048 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
1049 return -1;
1050 }
1051 if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
1052 printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
1053 return -1;
1054 }
1055 DEV_ATTRIB(dev)->emulate_fua_read = flag;
1056 printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
1057 dev, DEV_ATTRIB(dev)->emulate_fua_read);
1058 return 0;
1059}
1060
1061int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
1062{
1063 if ((flag != 0) && (flag != 1)) {
1064 printk(KERN_ERR "Illegal value %d\n", flag);
1065 return -1;
1066 }
1067 if (TRANSPORT(dev)->write_cache_emulated == NULL) {
1068 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
1069 return -1;
1070 }
1071 if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
1072 printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
1073 return -1;
1074 }
1075 DEV_ATTRIB(dev)->emulate_write_cache = flag;
1076 printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1077 dev, DEV_ATTRIB(dev)->emulate_write_cache);
1078 return 0;
1079}
1080
1081int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1082{
1083 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1084 printk(KERN_ERR "Illegal value %d\n", flag);
1085 return -1;
1086 }
1087
1088 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1089 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1090 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1091 " exists\n", dev,
1092 atomic_read(&dev->dev_export_obj.obj_access_count));
1093 return -1;
1094 }
1095 DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
1096 printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1097 dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
1098
1099 return 0;
1100}
1101
1102int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1103{
1104 if ((flag != 0) && (flag != 1)) {
1105 printk(KERN_ERR "Illegal value %d\n", flag);
1106 return -1;
1107 }
1108
1109 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1110 printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
1111 " dev_export_obj: %d count exists\n", dev,
1112 atomic_read(&dev->dev_export_obj.obj_access_count));
1113 return -1;
1114 }
1115 DEV_ATTRIB(dev)->emulate_tas = flag;
1116 printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1117 dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
1118
1119 return 0;
1120}
1121
1122int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1123{
1124 if ((flag != 0) && (flag != 1)) {
1125 printk(KERN_ERR "Illegal value %d\n", flag);
1126 return -1;
1127 }
1128 /*
1129 * We expect this value to be non-zero when generic Block Layer
1130 * Discard supported is detected iblock_create_virtdevice().
1131 */
1132 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1133 printk(KERN_ERR "Generic Block Discard not supported\n");
1134 return -ENOSYS;
1135 }
1136
1137 DEV_ATTRIB(dev)->emulate_tpu = flag;
1138 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1139 dev, flag);
1140 return 0;
1141}
1142
1143int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1144{
1145 if ((flag != 0) && (flag != 1)) {
1146 printk(KERN_ERR "Illegal value %d\n", flag);
1147 return -1;
1148 }
1149 /*
1150 * We expect this value to be non-zero when generic Block Layer
1151 * Discard supported is detected iblock_create_virtdevice().
1152 */
1153 if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
1154 printk(KERN_ERR "Generic Block Discard not supported\n");
1155 return -ENOSYS;
1156 }
1157
1158 DEV_ATTRIB(dev)->emulate_tpws = flag;
1159 printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1160 dev, flag);
1161 return 0;
1162}
1163
1164int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1165{
1166 if ((flag != 0) && (flag != 1)) {
1167 printk(KERN_ERR "Illegal value %d\n", flag);
1168 return -1;
1169 }
1170 DEV_ATTRIB(dev)->enforce_pr_isids = flag;
1171 printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1172 (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
1173 return 0;
1174}
1175
1176/*
1177 * Note, this can only be called on unexported SE Device Object.
1178 */
1179int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1180{
1181 u32 orig_queue_depth = dev->queue_depth;
1182
1183 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1184 printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
1185 " dev_export_obj: %d count exists\n", dev,
1186 atomic_read(&dev->dev_export_obj.obj_access_count));
1187 return -1;
1188 }
1189 if (!(queue_depth)) {
1190 printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
1191 "_depth\n", dev);
1192 return -1;
1193 }
1194
1195 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1196 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1197 printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
1198 " exceeds TCM/SE_Device TCQ: %u\n",
1199 dev, queue_depth,
1200 DEV_ATTRIB(dev)->hw_queue_depth);
1201 return -1;
1202 }
1203 } else {
1204 if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
1205 if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
1206 printk(KERN_ERR "dev[%p]: Passed queue_depth:"
1207 " %u exceeds TCM/SE_Device MAX"
1208 " TCQ: %u\n", dev, queue_depth,
1209 DEV_ATTRIB(dev)->hw_queue_depth);
1210 return -1;
1211 }
1212 }
1213 }
1214
1215 DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
1216 if (queue_depth > orig_queue_depth)
1217 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1218 else if (queue_depth < orig_queue_depth)
1219 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1220
1221 printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
1222 dev, queue_depth);
1223 return 0;
1224}
1225
1226int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1227{
1228 int force = 0; /* Force setting for VDEVS */
1229
1230 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1231 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1232 " max_sectors while dev_export_obj: %d count exists\n",
1233 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1234 return -1;
1235 }
1236 if (!(max_sectors)) {
1237 printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
1238 " max_sectors\n", dev);
1239 return -1;
1240 }
1241 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1242 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
1243 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1244 DA_STATUS_MAX_SECTORS_MIN);
1245 return -1;
1246 }
1247 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1248 if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
1249 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1250 " greater than TCM/SE_Device max_sectors:"
1251 " %u\n", dev, max_sectors,
1252 DEV_ATTRIB(dev)->hw_max_sectors);
1253 return -1;
1254 }
1255 } else {
1256 if (!(force) && (max_sectors >
1257 DEV_ATTRIB(dev)->hw_max_sectors)) {
1258 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1259 " greater than TCM/SE_Device max_sectors"
1260 ": %u, use force=1 to override.\n", dev,
1261 max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
1262 return -1;
1263 }
1264 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1265 printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
1266 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1267 " %u\n", dev, max_sectors,
1268 DA_STATUS_MAX_SECTORS_MAX);
1269 return -1;
1270 }
1271 }
1272
1273 DEV_ATTRIB(dev)->max_sectors = max_sectors;
1274 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1275 dev, max_sectors);
1276 return 0;
1277}
1278
1279int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1280{
1281 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1282 printk(KERN_ERR "dev[%p]: Unable to change SE Device"
1283 " optimal_sectors while dev_export_obj: %d count exists\n",
1284 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1285 return -EINVAL;
1286 }
1287 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1288 printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
1289 " changed for TCM/pSCSI\n", dev);
1290 return -EINVAL;
1291 }
1292 if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
1293 printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
1294 " greater than max_sectors: %u\n", dev,
1295 optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
1296 return -EINVAL;
1297 }
1298
1299 DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
1300 printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
1301 dev, optimal_sectors);
1302 return 0;
1303}
1304
1305int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1306{
1307 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1308 printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
1309 " while dev_export_obj: %d count exists\n", dev,
1310 atomic_read(&dev->dev_export_obj.obj_access_count));
1311 return -1;
1312 }
1313
1314 if ((block_size != 512) &&
1315 (block_size != 1024) &&
1316 (block_size != 2048) &&
1317 (block_size != 4096)) {
1318 printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
1319 " for SE device, must be 512, 1024, 2048 or 4096\n",
1320 dev, block_size);
1321 return -1;
1322 }
1323
1324 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1325 printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
1326 " Physical Device, use for Linux/SCSI to change"
1327 " block_size for underlying hardware\n", dev);
1328 return -1;
1329 }
1330
1331 DEV_ATTRIB(dev)->block_size = block_size;
1332 printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
1333 dev, block_size);
1334 return 0;
1335}
1336
1337struct se_lun *core_dev_add_lun(
1338 struct se_portal_group *tpg,
1339 struct se_hba *hba,
1340 struct se_device *dev,
1341 u32 lun)
1342{
1343 struct se_lun *lun_p;
1344 u32 lun_access = 0;
1345
1346 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1347 printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
1348 atomic_read(&dev->dev_access_obj.obj_access_count));
1349 return NULL;
1350 }
1351
1352 lun_p = core_tpg_pre_addlun(tpg, lun);
1353 if ((IS_ERR(lun_p)) || !(lun_p))
1354 return NULL;
1355
1356 if (dev->dev_flags & DF_READ_ONLY)
1357 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1358 else
1359 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1360
1361 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1362 return NULL;
1363
1364 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1365 " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1366 TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
1367 TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
1368 /*
1369 * Update LUN maps for dynamically added initiators when
1370 * generate_node_acl is enabled.
1371 */
1372 if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
1373 struct se_node_acl *acl;
1374 spin_lock_bh(&tpg->acl_node_lock);
1375 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1376 if (acl->dynamic_node_acl) {
1377 spin_unlock_bh(&tpg->acl_node_lock);
1378 core_tpg_add_node_to_devs(acl, tpg);
1379 spin_lock_bh(&tpg->acl_node_lock);
1380 }
1381 }
1382 spin_unlock_bh(&tpg->acl_node_lock);
1383 }
1384
1385 return lun_p;
1386}
1387
1388/* core_dev_del_lun():
1389 *
1390 *
1391 */
1392int core_dev_del_lun(
1393 struct se_portal_group *tpg,
1394 u32 unpacked_lun)
1395{
1396 struct se_lun *lun;
1397 int ret = 0;
1398
1399 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1400 if (!(lun))
1401 return ret;
1402
1403 core_tpg_post_dellun(tpg, lun);
1404
1405 printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1406 " device object\n", TPG_TFO(tpg)->get_fabric_name(),
1407 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
1408 TPG_TFO(tpg)->get_fabric_name());
1409
1410 return 0;
1411}
1412
1413struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1414{
1415 struct se_lun *lun;
1416
1417 spin_lock(&tpg->tpg_lun_lock);
1418 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1419 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1420 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1421 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1422 TRANSPORT_MAX_LUNS_PER_TPG-1,
1423 TPG_TFO(tpg)->tpg_get_tag(tpg));
1424 spin_unlock(&tpg->tpg_lun_lock);
1425 return NULL;
1426 }
1427 lun = &tpg->tpg_lun_list[unpacked_lun];
1428
1429 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1430 printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
1431 " Target Portal Group: %hu, ignoring request.\n",
1432 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1433 TPG_TFO(tpg)->tpg_get_tag(tpg));
1434 spin_unlock(&tpg->tpg_lun_lock);
1435 return NULL;
1436 }
1437 spin_unlock(&tpg->tpg_lun_lock);
1438
1439 return lun;
1440}
1441
1442/* core_dev_get_lun():
1443 *
1444 *
1445 */
1446static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1447{
1448 struct se_lun *lun;
1449
1450 spin_lock(&tpg->tpg_lun_lock);
1451 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1452 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1453 "_TPG-1: %u for Target Portal Group: %hu\n",
1454 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1455 TRANSPORT_MAX_LUNS_PER_TPG-1,
1456 TPG_TFO(tpg)->tpg_get_tag(tpg));
1457 spin_unlock(&tpg->tpg_lun_lock);
1458 return NULL;
1459 }
1460 lun = &tpg->tpg_lun_list[unpacked_lun];
1461
1462 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1463 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1464 " Target Portal Group: %hu, ignoring request.\n",
1465 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1466 TPG_TFO(tpg)->tpg_get_tag(tpg));
1467 spin_unlock(&tpg->tpg_lun_lock);
1468 return NULL;
1469 }
1470 spin_unlock(&tpg->tpg_lun_lock);
1471
1472 return lun;
1473}
1474
1475struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1476 struct se_portal_group *tpg,
1477 u32 mapped_lun,
1478 char *initiatorname,
1479 int *ret)
1480{
1481 struct se_lun_acl *lacl;
1482 struct se_node_acl *nacl;
1483
1484 if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
1485 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1486 TPG_TFO(tpg)->get_fabric_name());
1487 *ret = -EOVERFLOW;
1488 return NULL;
1489 }
1490 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1491 if (!(nacl)) {
1492 *ret = -EINVAL;
1493 return NULL;
1494 }
1495 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1496 if (!(lacl)) {
1497 printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
1498 *ret = -ENOMEM;
1499 return NULL;
1500 }
1501
1502 INIT_LIST_HEAD(&lacl->lacl_list);
1503 lacl->mapped_lun = mapped_lun;
1504 lacl->se_lun_nacl = nacl;
1505 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1506
1507 return lacl;
1508}
1509
1510int core_dev_add_initiator_node_lun_acl(
1511 struct se_portal_group *tpg,
1512 struct se_lun_acl *lacl,
1513 u32 unpacked_lun,
1514 u32 lun_access)
1515{
1516 struct se_lun *lun;
1517 struct se_node_acl *nacl;
1518
1519 lun = core_dev_get_lun(tpg, unpacked_lun);
1520 if (!(lun)) {
1521 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
1522 " Target Portal Group: %hu, ignoring request.\n",
1523 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
1524 TPG_TFO(tpg)->tpg_get_tag(tpg));
1525 return -EINVAL;
1526 }
1527
1528 nacl = lacl->se_lun_nacl;
1529 if (!(nacl))
1530 return -EINVAL;
1531
1532 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1533 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1534 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1535
1536 lacl->se_lun = lun;
1537
1538 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1539 lun_access, nacl, tpg, 1) < 0)
1540 return -EINVAL;
1541
1542 spin_lock(&lun->lun_acl_lock);
1543 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1544 atomic_inc(&lun->lun_acl_count);
1545 smp_mb__after_atomic_inc();
1546 spin_unlock(&lun->lun_acl_lock);
1547
1548 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1549 " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
1550 TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1551 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1552 lacl->initiatorname);
1553 /*
1554 * Check to see if there are any existing persistent reservation APTPL
1555 * pre-registrations that need to be enabled for this LUN ACL..
1556 */
1557 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1558 return 0;
1559}
1560
1561/* core_dev_del_initiator_node_lun_acl():
1562 *
1563 *
1564 */
1565int core_dev_del_initiator_node_lun_acl(
1566 struct se_portal_group *tpg,
1567 struct se_lun *lun,
1568 struct se_lun_acl *lacl)
1569{
1570 struct se_node_acl *nacl;
1571
1572 nacl = lacl->se_lun_nacl;
1573 if (!(nacl))
1574 return -EINVAL;
1575
1576 spin_lock(&lun->lun_acl_lock);
1577 list_del(&lacl->lacl_list);
1578 atomic_dec(&lun->lun_acl_count);
1579 smp_mb__after_atomic_dec();
1580 spin_unlock(&lun->lun_acl_lock);
1581
1582 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1583 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1584
1585 lacl->se_lun = NULL;
1586
1587 printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1588 " InitiatorNode: %s Mapped LUN: %u\n",
1589 TPG_TFO(tpg)->get_fabric_name(),
1590 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
1591 lacl->initiatorname, lacl->mapped_lun);
1592
1593 return 0;
1594}
1595
1596void core_dev_free_initiator_node_lun_acl(
1597 struct se_portal_group *tpg,
1598 struct se_lun_acl *lacl)
1599{
1600 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1601 " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
1602 TPG_TFO(tpg)->tpg_get_tag(tpg),
1603 TPG_TFO(tpg)->get_fabric_name(),
1604 lacl->initiatorname, lacl->mapped_lun);
1605
1606 kfree(lacl);
1607}
1608
1609int core_dev_setup_virtual_lun0(void)
1610{
1611 struct se_hba *hba;
1612 struct se_device *dev;
1613 struct se_subsystem_dev *se_dev = NULL;
1614 struct se_subsystem_api *t;
1615 char buf[16];
1616 int ret;
1617
1618 hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
1619 if (IS_ERR(hba))
1620 return PTR_ERR(hba);
1621
1622 se_global->g_lun0_hba = hba;
1623 t = hba->transport;
1624
1625 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1626 if (!(se_dev)) {
1627 printk(KERN_ERR "Unable to allocate memory for"
1628 " struct se_subsystem_dev\n");
1629 ret = -ENOMEM;
1630 goto out;
1631 }
1632 INIT_LIST_HEAD(&se_dev->g_se_dev_list);
1633 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1634 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1635 INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
1636 INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
1637 spin_lock_init(&se_dev->t10_reservation.registration_lock);
1638 spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
1639 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1640 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1641 spin_lock_init(&se_dev->se_dev_lock);
1642 se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1643 se_dev->t10_wwn.t10_sub_dev = se_dev;
1644 se_dev->t10_alua.t10_sub_dev = se_dev;
1645 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1646 se_dev->se_dev_hba = hba;
1647
1648 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1649 if (!(se_dev->se_dev_su_ptr)) {
1650 printk(KERN_ERR "Unable to locate subsystem dependent pointer"
1651 " from allocate_virtdevice()\n");
1652 ret = -ENOMEM;
1653 goto out;
1654 }
1655 se_global->g_lun0_su_dev = se_dev;
1656
1657 memset(buf, 0, 16);
1658 sprintf(buf, "rd_pages=8");
1659 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1660
1661 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1662 if (!(dev) || IS_ERR(dev)) {
1663 ret = -ENOMEM;
1664 goto out;
1665 }
1666 se_dev->se_dev_ptr = dev;
1667 se_global->g_lun0_dev = dev;
1668
1669 return 0;
1670out:
1671 se_global->g_lun0_su_dev = NULL;
1672 kfree(se_dev);
1673 if (se_global->g_lun0_hba) {
1674 core_delete_hba(se_global->g_lun0_hba);
1675 se_global->g_lun0_hba = NULL;
1676 }
1677 return ret;
1678}
1679
1680
1681void core_dev_release_virtual_lun0(void)
1682{
1683 struct se_hba *hba = se_global->g_lun0_hba;
1684 struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
1685
1686 if (!(hba))
1687 return;
1688
1689 if (se_global->g_lun0_dev)
1690 se_free_virtual_device(se_global->g_lun0_dev, hba);
1691
1692 kfree(su_dev);
1693 core_delete_hba(hba);
1694}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 000000000000..32b148d7e261
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,996 @@
1/*******************************************************************************
2* Filename: target_core_fabric_configfs.c
3 *
4 * This file contains generic fabric module configfs infrastructure for
5 * TCM v4.x code
6 *
7 * Copyright (c) 2010 Rising Tide Systems
8 * Copyright (c) 2010 Linux-iSCSI.org
9 *
10 * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
11*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/version.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/fs.h>
30#include <linux/namei.h>
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <linux/delay.h>
34#include <linux/unistd.h>
35#include <linux/string.h>
36#include <linux/syscalls.h>
37#include <linux/configfs.h>
38
39#include <target/target_core_base.h>
40#include <target/target_core_device.h>
41#include <target/target_core_tpg.h>
42#include <target/target_core_transport.h>
43#include <target/target_core_fabric_ops.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47
48#include "target_core_alua.h"
49#include "target_core_hba.h"
50#include "target_core_pr.h"
51
52#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
53static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
54{ \
55 struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
56 struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
57 \
58 cit->ct_item_ops = _item_ops; \
59 cit->ct_group_ops = _group_ops; \
60 cit->ct_attrs = _attrs; \
61 cit->ct_owner = tf->tf_module; \
62 printk("Setup generic %s\n", __stringify(_name)); \
63}
64
65/* Start of tfc_tpg_mappedlun_cit */
66
67static int target_fabric_mappedlun_link(
68 struct config_item *lun_acl_ci,
69 struct config_item *lun_ci)
70{
71 struct se_dev_entry *deve;
72 struct se_lun *lun = container_of(to_config_group(lun_ci),
73 struct se_lun, lun_group);
74 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
75 struct se_lun_acl, se_lun_group);
76 struct se_portal_group *se_tpg;
77 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
78 int ret = 0, lun_access;
79 /*
80 * Ensure that the source port exists
81 */
82 if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
83 printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
84 "_tpg does not exist\n");
85 return -EINVAL;
86 }
87 se_tpg = lun->lun_sep->sep_tpg;
88
89 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
90 tpg_ci = &nacl_ci->ci_group->cg_item;
91 wwn_ci = &tpg_ci->ci_group->cg_item;
92 tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
93 wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
94 /*
95 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
96 */
97 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
98 printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
99 config_item_name(wwn_ci));
100 return -EINVAL;
101 }
102 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
103 printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
104 " TPGT: %s\n", config_item_name(wwn_ci),
105 config_item_name(tpg_ci));
106 return -EINVAL;
107 }
108 /*
109 * If this struct se_node_acl was dynamically generated with
110 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
111 * which be will write protected (READ-ONLY) when
112 * tpg_1/attrib/demo_mode_write_protect=1
113 */
114 spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
115 deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
116 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
117 lun_access = deve->lun_flags;
118 else
119 lun_access =
120 (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
121 se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
122 TRANSPORT_LUNFLAGS_READ_WRITE;
123 spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
124 /*
125 * Determine the actual mapped LUN value user wants..
126 *
127 * This value is what the SCSI Initiator actually sees the
128 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
129 */
130 ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
131 lun->unpacked_lun, lun_access);
132
133 return (ret < 0) ? -EINVAL : 0;
134}
135
136static int target_fabric_mappedlun_unlink(
137 struct config_item *lun_acl_ci,
138 struct config_item *lun_ci)
139{
140 struct se_lun *lun;
141 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
142 struct se_lun_acl, se_lun_group);
143 struct se_node_acl *nacl = lacl->se_lun_nacl;
144 struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
145 struct se_portal_group *se_tpg;
146 /*
147 * Determine if the underlying MappedLUN has already been released..
148 */
149 if (!(deve->se_lun))
150 return 0;
151
152 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
153 se_tpg = lun->lun_sep->sep_tpg;
154
155 core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
156 return 0;
157}
158
159CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
160#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
161static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
162 __CONFIGFS_EATTR(_name, _mode, \
163 target_fabric_mappedlun_show_##_name, \
164 target_fabric_mappedlun_store_##_name);
165
166static ssize_t target_fabric_mappedlun_show_write_protect(
167 struct se_lun_acl *lacl,
168 char *page)
169{
170 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
171 struct se_dev_entry *deve;
172 ssize_t len;
173
174 spin_lock_irq(&se_nacl->device_list_lock);
175 deve = &se_nacl->device_list[lacl->mapped_lun];
176 len = sprintf(page, "%d\n",
177 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
178 1 : 0);
179 spin_unlock_irq(&se_nacl->device_list_lock);
180
181 return len;
182}
183
184static ssize_t target_fabric_mappedlun_store_write_protect(
185 struct se_lun_acl *lacl,
186 const char *page,
187 size_t count)
188{
189 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
190 struct se_portal_group *se_tpg = se_nacl->se_tpg;
191 unsigned long op;
192
193 if (strict_strtoul(page, 0, &op))
194 return -EINVAL;
195
196 if ((op != 1) && (op != 0))
197 return -EINVAL;
198
199 core_update_device_list_access(lacl->mapped_lun, (op) ?
200 TRANSPORT_LUNFLAGS_READ_ONLY :
201 TRANSPORT_LUNFLAGS_READ_WRITE,
202 lacl->se_lun_nacl);
203
204 printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
205 " Mapped LUN: %u Write Protect bit to %s\n",
206 TPG_TFO(se_tpg)->get_fabric_name(),
207 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
208
209 return count;
210
211}
212
213TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
214
215CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
216
217static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
218 &target_fabric_mappedlun_write_protect.attr,
219 NULL,
220};
221
222static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
223 .show_attribute = target_fabric_mappedlun_attr_show,
224 .store_attribute = target_fabric_mappedlun_attr_store,
225 .allow_link = target_fabric_mappedlun_link,
226 .drop_link = target_fabric_mappedlun_unlink,
227};
228
229TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
230 target_fabric_mappedlun_attrs);
231
232/* End of tfc_tpg_mappedlun_cit */
233
234/* Start of tfc_tpg_nacl_attrib_cit */
235
236CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
237
238static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
239 .show_attribute = target_fabric_nacl_attrib_attr_show,
240 .store_attribute = target_fabric_nacl_attrib_attr_store,
241};
242
243TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
244
245/* End of tfc_tpg_nacl_attrib_cit */
246
247/* Start of tfc_tpg_nacl_auth_cit */
248
249CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
250
251static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
252 .show_attribute = target_fabric_nacl_auth_attr_show,
253 .store_attribute = target_fabric_nacl_auth_attr_store,
254};
255
256TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
257
258/* End of tfc_tpg_nacl_auth_cit */
259
260/* Start of tfc_tpg_nacl_param_cit */
261
262CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
263
264static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
265 .show_attribute = target_fabric_nacl_param_attr_show,
266 .store_attribute = target_fabric_nacl_param_attr_store,
267};
268
269TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
270
271/* End of tfc_tpg_nacl_param_cit */
272
273/* Start of tfc_tpg_nacl_base_cit */
274
275CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
276
277static struct config_group *target_fabric_make_mappedlun(
278 struct config_group *group,
279 const char *name)
280{
281 struct se_node_acl *se_nacl = container_of(group,
282 struct se_node_acl, acl_group);
283 struct se_portal_group *se_tpg = se_nacl->se_tpg;
284 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
285 struct se_lun_acl *lacl;
286 struct config_item *acl_ci;
287 char *buf;
288 unsigned long mapped_lun;
289 int ret = 0;
290
291 acl_ci = &group->cg_item;
292 if (!(acl_ci)) {
293 printk(KERN_ERR "Unable to locatel acl_ci\n");
294 return NULL;
295 }
296
297 buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
298 if (!(buf)) {
299 printk(KERN_ERR "Unable to allocate memory for name buf\n");
300 return ERR_PTR(-ENOMEM);
301 }
302 snprintf(buf, strlen(name) + 1, "%s", name);
303 /*
304 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
305 */
306 if (strstr(buf, "lun_") != buf) {
307 printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
308 " name: %s\n", buf, name);
309 ret = -EINVAL;
310 goto out;
311 }
312 /*
313 * Determine the Mapped LUN value. This is what the SCSI Initiator
314 * Port will actually see.
315 */
316 if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
317 ret = -EINVAL;
318 goto out;
319 }
320
321 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
322 config_item_name(acl_ci), &ret);
323 if (!(lacl))
324 goto out;
325
326 config_group_init_type_name(&lacl->se_lun_group, name,
327 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
328
329 kfree(buf);
330 return &lacl->se_lun_group;
331out:
332 kfree(buf);
333 return ERR_PTR(ret);
334}
335
336static void target_fabric_drop_mappedlun(
337 struct config_group *group,
338 struct config_item *item)
339{
340 struct se_lun_acl *lacl = container_of(to_config_group(item),
341 struct se_lun_acl, se_lun_group);
342 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
343
344 config_item_put(item);
345 core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
346}
347
348static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
349 .show_attribute = target_fabric_nacl_base_attr_show,
350 .store_attribute = target_fabric_nacl_base_attr_store,
351};
352
353static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
354 .make_group = target_fabric_make_mappedlun,
355 .drop_item = target_fabric_drop_mappedlun,
356};
357
358TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
359 &target_fabric_nacl_base_group_ops, NULL);
360
361/* End of tfc_tpg_nacl_base_cit */
362
363/* Start of tfc_tpg_nacl_cit */
364
365static struct config_group *target_fabric_make_nodeacl(
366 struct config_group *group,
367 const char *name)
368{
369 struct se_portal_group *se_tpg = container_of(group,
370 struct se_portal_group, tpg_acl_group);
371 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
372 struct se_node_acl *se_nacl;
373 struct config_group *nacl_cg;
374
375 if (!(tf->tf_ops.fabric_make_nodeacl)) {
376 printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
377 return ERR_PTR(-ENOSYS);
378 }
379
380 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
381 if (IS_ERR(se_nacl))
382 return ERR_PTR(PTR_ERR(se_nacl));
383
384 nacl_cg = &se_nacl->acl_group;
385 nacl_cg->default_groups = se_nacl->acl_default_groups;
386 nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
387 nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
388 nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
389 nacl_cg->default_groups[3] = NULL;
390
391 config_group_init_type_name(&se_nacl->acl_group, name,
392 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
393 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
394 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
395 config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
396 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
397 config_group_init_type_name(&se_nacl->acl_param_group, "param",
398 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
399
400 return &se_nacl->acl_group;
401}
402
403static void target_fabric_drop_nodeacl(
404 struct config_group *group,
405 struct config_item *item)
406{
407 struct se_portal_group *se_tpg = container_of(group,
408 struct se_portal_group, tpg_acl_group);
409 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
410 struct se_node_acl *se_nacl = container_of(to_config_group(item),
411 struct se_node_acl, acl_group);
412 struct config_item *df_item;
413 struct config_group *nacl_cg;
414 int i;
415
416 nacl_cg = &se_nacl->acl_group;
417 for (i = 0; nacl_cg->default_groups[i]; i++) {
418 df_item = &nacl_cg->default_groups[i]->cg_item;
419 nacl_cg->default_groups[i] = NULL;
420 config_item_put(df_item);
421 }
422
423 config_item_put(item);
424 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
425}
426
427static struct configfs_group_operations target_fabric_nacl_group_ops = {
428 .make_group = target_fabric_make_nodeacl,
429 .drop_item = target_fabric_drop_nodeacl,
430};
431
432TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
433
434/* End of tfc_tpg_nacl_cit */
435
436/* Start of tfc_tpg_np_base_cit */
437
438CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
439
440static struct configfs_item_operations target_fabric_np_base_item_ops = {
441 .show_attribute = target_fabric_np_base_attr_show,
442 .store_attribute = target_fabric_np_base_attr_store,
443};
444
445TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
446
447/* End of tfc_tpg_np_base_cit */
448
449/* Start of tfc_tpg_np_cit */
450
451static struct config_group *target_fabric_make_np(
452 struct config_group *group,
453 const char *name)
454{
455 struct se_portal_group *se_tpg = container_of(group,
456 struct se_portal_group, tpg_np_group);
457 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
458 struct se_tpg_np *se_tpg_np;
459
460 if (!(tf->tf_ops.fabric_make_np)) {
461 printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
462 return ERR_PTR(-ENOSYS);
463 }
464
465 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
466 if (!(se_tpg_np) || IS_ERR(se_tpg_np))
467 return ERR_PTR(-EINVAL);
468
469 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
470 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
471
472 return &se_tpg_np->tpg_np_group;
473}
474
475static void target_fabric_drop_np(
476 struct config_group *group,
477 struct config_item *item)
478{
479 struct se_portal_group *se_tpg = container_of(group,
480 struct se_portal_group, tpg_np_group);
481 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
482 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
483 struct se_tpg_np, tpg_np_group);
484
485 config_item_put(item);
486 tf->tf_ops.fabric_drop_np(se_tpg_np);
487}
488
489static struct configfs_group_operations target_fabric_np_group_ops = {
490 .make_group = &target_fabric_make_np,
491 .drop_item = &target_fabric_drop_np,
492};
493
494TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
495
496/* End of tfc_tpg_np_cit */
497
498/* Start of tfc_tpg_port_cit */
499
500CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
501#define TCM_PORT_ATTR(_name, _mode) \
502static struct target_fabric_port_attribute target_fabric_port_##_name = \
503 __CONFIGFS_EATTR(_name, _mode, \
504 target_fabric_port_show_attr_##_name, \
505 target_fabric_port_store_attr_##_name);
506
507#define TCM_PORT_ATTOR_RO(_name) \
508 __CONFIGFS_EATTR_RO(_name, \
509 target_fabric_port_show_attr_##_name);
510
511/*
512 * alua_tg_pt_gp
513 */
514static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
515 struct se_lun *lun,
516 char *page)
517{
518 if (!(lun))
519 return -ENODEV;
520
521 if (!(lun->lun_sep))
522 return -ENODEV;
523
524 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
525}
526
527static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
528 struct se_lun *lun,
529 const char *page,
530 size_t count)
531{
532 if (!(lun))
533 return -ENODEV;
534
535 if (!(lun->lun_sep))
536 return -ENODEV;
537
538 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
539}
540
541TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
542
543/*
544 * alua_tg_pt_offline
545 */
546static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
547 struct se_lun *lun,
548 char *page)
549{
550 if (!(lun))
551 return -ENODEV;
552
553 if (!(lun->lun_sep))
554 return -ENODEV;
555
556 return core_alua_show_offline_bit(lun, page);
557}
558
559static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
560 struct se_lun *lun,
561 const char *page,
562 size_t count)
563{
564 if (!(lun))
565 return -ENODEV;
566
567 if (!(lun->lun_sep))
568 return -ENODEV;
569
570 return core_alua_store_offline_bit(lun, page, count);
571}
572
573TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
574
575/*
576 * alua_tg_pt_status
577 */
578static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
579 struct se_lun *lun,
580 char *page)
581{
582 if (!(lun))
583 return -ENODEV;
584
585 if (!(lun->lun_sep))
586 return -ENODEV;
587
588 return core_alua_show_secondary_status(lun, page);
589}
590
591static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
592 struct se_lun *lun,
593 const char *page,
594 size_t count)
595{
596 if (!(lun))
597 return -ENODEV;
598
599 if (!(lun->lun_sep))
600 return -ENODEV;
601
602 return core_alua_store_secondary_status(lun, page, count);
603}
604
605TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
606
607/*
608 * alua_tg_pt_write_md
609 */
610static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
611 struct se_lun *lun,
612 char *page)
613{
614 if (!(lun))
615 return -ENODEV;
616
617 if (!(lun->lun_sep))
618 return -ENODEV;
619
620 return core_alua_show_secondary_write_metadata(lun, page);
621}
622
623static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
624 struct se_lun *lun,
625 const char *page,
626 size_t count)
627{
628 if (!(lun))
629 return -ENODEV;
630
631 if (!(lun->lun_sep))
632 return -ENODEV;
633
634 return core_alua_store_secondary_write_metadata(lun, page, count);
635}
636
637TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
638
639
640static struct configfs_attribute *target_fabric_port_attrs[] = {
641 &target_fabric_port_alua_tg_pt_gp.attr,
642 &target_fabric_port_alua_tg_pt_offline.attr,
643 &target_fabric_port_alua_tg_pt_status.attr,
644 &target_fabric_port_alua_tg_pt_write_md.attr,
645 NULL,
646};
647
648CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
649
650static int target_fabric_port_link(
651 struct config_item *lun_ci,
652 struct config_item *se_dev_ci)
653{
654 struct config_item *tpg_ci;
655 struct se_device *dev;
656 struct se_lun *lun = container_of(to_config_group(lun_ci),
657 struct se_lun, lun_group);
658 struct se_lun *lun_p;
659 struct se_portal_group *se_tpg;
660 struct se_subsystem_dev *se_dev = container_of(
661 to_config_group(se_dev_ci), struct se_subsystem_dev,
662 se_dev_group);
663 struct target_fabric_configfs *tf;
664 int ret;
665
666 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
667 se_tpg = container_of(to_config_group(tpg_ci),
668 struct se_portal_group, tpg_group);
669 tf = se_tpg->se_tpg_wwn->wwn_tf;
670
671 if (lun->lun_se_dev != NULL) {
672 printk(KERN_ERR "Port Symlink already exists\n");
673 return -EEXIST;
674 }
675
676 dev = se_dev->se_dev_ptr;
677 if (!(dev)) {
678 printk(KERN_ERR "Unable to locate struct se_device pointer from"
679 " %s\n", config_item_name(se_dev_ci));
680 ret = -ENODEV;
681 goto out;
682 }
683
684 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
685 lun->unpacked_lun);
686 if ((IS_ERR(lun_p)) || !(lun_p)) {
687 printk(KERN_ERR "core_dev_add_lun() failed\n");
688 ret = -EINVAL;
689 goto out;
690 }
691
692 if (tf->tf_ops.fabric_post_link) {
693 /*
694 * Call the optional fabric_post_link() to allow a
695 * fabric module to setup any additional state once
696 * core_dev_add_lun() has been called..
697 */
698 tf->tf_ops.fabric_post_link(se_tpg, lun);
699 }
700
701 return 0;
702out:
703 return ret;
704}
705
706static int target_fabric_port_unlink(
707 struct config_item *lun_ci,
708 struct config_item *se_dev_ci)
709{
710 struct se_lun *lun = container_of(to_config_group(lun_ci),
711 struct se_lun, lun_group);
712 struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
713 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
714
715 if (tf->tf_ops.fabric_pre_unlink) {
716 /*
717 * Call the optional fabric_pre_unlink() to allow a
718 * fabric module to release any additional stat before
719 * core_dev_del_lun() is called.
720 */
721 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
722 }
723
724 core_dev_del_lun(se_tpg, lun->unpacked_lun);
725 return 0;
726}
727
728static struct configfs_item_operations target_fabric_port_item_ops = {
729 .show_attribute = target_fabric_port_attr_show,
730 .store_attribute = target_fabric_port_attr_store,
731 .allow_link = target_fabric_port_link,
732 .drop_link = target_fabric_port_unlink,
733};
734
735TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
736
737/* End of tfc_tpg_port_cit */
738
739/* Start of tfc_tpg_lun_cit */
740
741static struct config_group *target_fabric_make_lun(
742 struct config_group *group,
743 const char *name)
744{
745 struct se_lun *lun;
746 struct se_portal_group *se_tpg = container_of(group,
747 struct se_portal_group, tpg_lun_group);
748 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
749 unsigned long unpacked_lun;
750
751 if (strstr(name, "lun_") != name) {
752 printk(KERN_ERR "Unable to locate \'_\" in"
753 " \"lun_$LUN_NUMBER\"\n");
754 return ERR_PTR(-EINVAL);
755 }
756 if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
757 return ERR_PTR(-EINVAL);
758
759 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
760 if (!(lun))
761 return ERR_PTR(-EINVAL);
762
763 config_group_init_type_name(&lun->lun_group, name,
764 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
765
766 return &lun->lun_group;
767}
768
769static void target_fabric_drop_lun(
770 struct config_group *group,
771 struct config_item *item)
772{
773 config_item_put(item);
774}
775
776static struct configfs_group_operations target_fabric_lun_group_ops = {
777 .make_group = &target_fabric_make_lun,
778 .drop_item = &target_fabric_drop_lun,
779};
780
781TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
782
783/* End of tfc_tpg_lun_cit */
784
785/* Start of tfc_tpg_attrib_cit */
786
787CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
788
789static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
790 .show_attribute = target_fabric_tpg_attrib_attr_show,
791 .store_attribute = target_fabric_tpg_attrib_attr_store,
792};
793
794TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
795
796/* End of tfc_tpg_attrib_cit */
797
798/* Start of tfc_tpg_param_cit */
799
800CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
801
802static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
803 .show_attribute = target_fabric_tpg_param_attr_show,
804 .store_attribute = target_fabric_tpg_param_attr_store,
805};
806
807TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
808
809/* End of tfc_tpg_param_cit */
810
811/* Start of tfc_tpg_base_cit */
812/*
813 * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
814 */
815CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
816
817static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
818 .show_attribute = target_fabric_tpg_attr_show,
819 .store_attribute = target_fabric_tpg_attr_store,
820};
821
822TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
823
824/* End of tfc_tpg_base_cit */
825
826/* Start of tfc_tpg_cit */
827
828static struct config_group *target_fabric_make_tpg(
829 struct config_group *group,
830 const char *name)
831{
832 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
833 struct target_fabric_configfs *tf = wwn->wwn_tf;
834 struct se_portal_group *se_tpg;
835
836 if (!(tf->tf_ops.fabric_make_tpg)) {
837 printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
838 return ERR_PTR(-ENOSYS);
839 }
840
841 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
842 if (!(se_tpg) || IS_ERR(se_tpg))
843 return ERR_PTR(-EINVAL);
844 /*
845 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
846 */
847 se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
848 se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
849 se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
850 se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
851 se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
852 se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
853 se_tpg->tpg_group.default_groups[5] = NULL;
854
855 config_group_init_type_name(&se_tpg->tpg_group, name,
856 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
857 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
858 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
859 config_group_init_type_name(&se_tpg->tpg_np_group, "np",
860 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
861 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
862 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
863 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
864 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
865 config_group_init_type_name(&se_tpg->tpg_param_group, "param",
866 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
867
868 return &se_tpg->tpg_group;
869}
870
871static void target_fabric_drop_tpg(
872 struct config_group *group,
873 struct config_item *item)
874{
875 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
876 struct target_fabric_configfs *tf = wwn->wwn_tf;
877 struct se_portal_group *se_tpg = container_of(to_config_group(item),
878 struct se_portal_group, tpg_group);
879 struct config_group *tpg_cg = &se_tpg->tpg_group;
880 struct config_item *df_item;
881 int i;
882 /*
883 * Release default groups, but do not release tpg_cg->default_groups
884 * memory as it is statically allocated at se_tpg->tpg_default_groups.
885 */
886 for (i = 0; tpg_cg->default_groups[i]; i++) {
887 df_item = &tpg_cg->default_groups[i]->cg_item;
888 tpg_cg->default_groups[i] = NULL;
889 config_item_put(df_item);
890 }
891
892 config_item_put(item);
893 tf->tf_ops.fabric_drop_tpg(se_tpg);
894}
895
896static struct configfs_group_operations target_fabric_tpg_group_ops = {
897 .make_group = target_fabric_make_tpg,
898 .drop_item = target_fabric_drop_tpg,
899};
900
901TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
902
903/* End of tfc_tpg_cit */
904
905/* Start of tfc_wwn_cit */
906
907static struct config_group *target_fabric_make_wwn(
908 struct config_group *group,
909 const char *name)
910{
911 struct target_fabric_configfs *tf = container_of(group,
912 struct target_fabric_configfs, tf_group);
913 struct se_wwn *wwn;
914
915 if (!(tf->tf_ops.fabric_make_wwn)) {
916 printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
917 return ERR_PTR(-ENOSYS);
918 }
919
920 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
921 if (!(wwn) || IS_ERR(wwn))
922 return ERR_PTR(-EINVAL);
923
924 wwn->wwn_tf = tf;
925 config_group_init_type_name(&wwn->wwn_group, name,
926 &TF_CIT_TMPL(tf)->tfc_tpg_cit);
927
928 return &wwn->wwn_group;
929}
930
931static void target_fabric_drop_wwn(
932 struct config_group *group,
933 struct config_item *item)
934{
935 struct target_fabric_configfs *tf = container_of(group,
936 struct target_fabric_configfs, tf_group);
937 struct se_wwn *wwn = container_of(to_config_group(item),
938 struct se_wwn, wwn_group);
939
940 config_item_put(item);
941 tf->tf_ops.fabric_drop_wwn(wwn);
942}
943
944static struct configfs_group_operations target_fabric_wwn_group_ops = {
945 .make_group = target_fabric_make_wwn,
946 .drop_item = target_fabric_drop_wwn,
947};
948/*
949 * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
950 */
951CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
952
953static struct configfs_item_operations target_fabric_wwn_item_ops = {
954 .show_attribute = target_fabric_wwn_attr_show,
955 .store_attribute = target_fabric_wwn_attr_store,
956};
957
958TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
959
960/* End of tfc_wwn_cit */
961
962/* Start of tfc_discovery_cit */
963
964CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
965 tf_disc_group);
966
967static struct configfs_item_operations target_fabric_discovery_item_ops = {
968 .show_attribute = target_fabric_discovery_attr_show,
969 .store_attribute = target_fabric_discovery_attr_store,
970};
971
972TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
973
974/* End of tfc_discovery_cit */
975
976int target_fabric_setup_cits(struct target_fabric_configfs *tf)
977{
978 target_fabric_setup_discovery_cit(tf);
979 target_fabric_setup_wwn_cit(tf);
980 target_fabric_setup_tpg_cit(tf);
981 target_fabric_setup_tpg_base_cit(tf);
982 target_fabric_setup_tpg_port_cit(tf);
983 target_fabric_setup_tpg_lun_cit(tf);
984 target_fabric_setup_tpg_np_cit(tf);
985 target_fabric_setup_tpg_np_base_cit(tf);
986 target_fabric_setup_tpg_attrib_cit(tf);
987 target_fabric_setup_tpg_param_cit(tf);
988 target_fabric_setup_tpg_nacl_cit(tf);
989 target_fabric_setup_tpg_nacl_base_cit(tf);
990 target_fabric_setup_tpg_nacl_attrib_cit(tf);
991 target_fabric_setup_tpg_nacl_auth_cit(tf);
992 target_fabric_setup_tpg_nacl_param_cit(tf);
993 target_fabric_setup_tpg_mappedlun_cit(tf);
994
995 return 0;
996}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 000000000000..26285644e4de
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
1/*******************************************************************************
2 * Filename: target_core_fabric_lib.c
3 *
4 * This file contains generic high level protocol identifier and PR
5 * handlers for TCM fabric modules
6 *
7 * Copyright (c) 2010 Rising Tide Systems, Inc.
8 * Copyright (c) 2010 Linux-iSCSI.org
9 *
10 * Nicholas A. Bellinger <nab@linux-iscsi.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 ******************************************************************************/
27
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/spinlock.h>
31#include <linux/smp_lock.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_cmnd.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_device.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h>
40
41#include "target_core_hba.h"
42#include "target_core_pr.h"
43
44/*
45 * Handlers for Serial Attached SCSI (SAS)
46 */
47u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
48{
49 /*
50 * Return a SAS Serial SCSI Protocol identifier for loopback operations
51 * This is defined in section 7.5.1 Table 362 in spc4r17
52 */
53 return 0x6;
54}
55EXPORT_SYMBOL(sas_get_fabric_proto_ident);
56
57u32 sas_get_pr_transport_id(
58 struct se_portal_group *se_tpg,
59 struct se_node_acl *se_nacl,
60 struct t10_pr_registration *pr_reg,
61 int *format_code,
62 unsigned char *buf)
63{
64 unsigned char binary, *ptr;
65 int i;
66 u32 off = 4;
67 /*
68 * Set PROTOCOL IDENTIFIER to 6h for SAS
69 */
70 buf[0] = 0x06;
71 /*
72 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
73 * over SAS Serial SCSI Protocol
74 */
75 ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
76
77 for (i = 0; i < 16; i += 2) {
78 binary = transport_asciihex_to_binaryhex(&ptr[i]);
79 buf[off++] = binary;
80 }
81 /*
82 * The SAS Transport ID is a hardcoded 24-byte length
83 */
84 return 24;
85}
86EXPORT_SYMBOL(sas_get_pr_transport_id);
87
88u32 sas_get_pr_transport_id_len(
89 struct se_portal_group *se_tpg,
90 struct se_node_acl *se_nacl,
91 struct t10_pr_registration *pr_reg,
92 int *format_code)
93{
94 *format_code = 0;
95 /*
96 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
97 * over SAS Serial SCSI Protocol
98 *
99 * The SAS Transport ID is a hardcoded 24-byte length
100 */
101 return 24;
102}
103EXPORT_SYMBOL(sas_get_pr_transport_id_len);
104
105/*
106 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
107 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
108 */
109char *sas_parse_pr_out_transport_id(
110 struct se_portal_group *se_tpg,
111 const char *buf,
112 u32 *out_tid_len,
113 char **port_nexus_ptr)
114{
115 /*
116 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
117 * for initiator ports using SCSI over SAS Serial SCSI Protocol
118 *
119 * The TransportID for a SAS Initiator Port is of fixed size of
120 * 24 bytes, and SAS does not contain a I_T nexus identifier,
121 * so we return the **port_nexus_ptr set to NULL.
122 */
123 *port_nexus_ptr = NULL;
124 *out_tid_len = 24;
125
126 return (char *)&buf[4];
127}
128EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
129
130/*
131 * Handlers for Fibre Channel Protocol (FCP)
132 */
133u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
134{
135 return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
136}
137EXPORT_SYMBOL(fc_get_fabric_proto_ident);
138
139u32 fc_get_pr_transport_id_len(
140 struct se_portal_group *se_tpg,
141 struct se_node_acl *se_nacl,
142 struct t10_pr_registration *pr_reg,
143 int *format_code)
144{
145 *format_code = 0;
146 /*
147 * The FC Transport ID is a hardcoded 24-byte length
148 */
149 return 24;
150}
151EXPORT_SYMBOL(fc_get_pr_transport_id_len);
152
153u32 fc_get_pr_transport_id(
154 struct se_portal_group *se_tpg,
155 struct se_node_acl *se_nacl,
156 struct t10_pr_registration *pr_reg,
157 int *format_code,
158 unsigned char *buf)
159{
160 unsigned char binary, *ptr;
161 int i;
162 u32 off = 8;
163 /*
164 * PROTOCOL IDENTIFIER is 0h for FCP-2
165 *
166 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
167 * SCSI over Fibre Channel
168 *
169 * We convert the ASCII formatted N Port name into a binary
170 * encoded TransportID.
171 */
172 ptr = &se_nacl->initiatorname[0];
173
174 for (i = 0; i < 24; ) {
175 if (!(strncmp(&ptr[i], ":", 1))) {
176 i++;
177 continue;
178 }
179 binary = transport_asciihex_to_binaryhex(&ptr[i]);
180 buf[off++] = binary;
181 i += 2;
182 }
183 /*
184 * The FC Transport ID is a hardcoded 24-byte length
185 */
186 return 24;
187}
188EXPORT_SYMBOL(fc_get_pr_transport_id);
189
190char *fc_parse_pr_out_transport_id(
191 struct se_portal_group *se_tpg,
192 const char *buf,
193 u32 *out_tid_len,
194 char **port_nexus_ptr)
195{
196 /*
197 * The TransportID for a FC N Port is of fixed size of
198 * 24 bytes, and FC does not contain a I_T nexus identifier,
199 * so we return the **port_nexus_ptr set to NULL.
200 */
201 *port_nexus_ptr = NULL;
202 *out_tid_len = 24;
203
204 return (char *)&buf[8];
205}
206EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
207
208/*
209 * Handlers for Internet Small Computer Systems Interface (iSCSI)
210 */
211
212u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
213{
214 /*
215 * This value is defined for "Internet SCSI (iSCSI)"
216 * in spc4r17 section 7.5.1 Table 362
217 */
218 return 0x5;
219}
220EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
221
222u32 iscsi_get_pr_transport_id(
223 struct se_portal_group *se_tpg,
224 struct se_node_acl *se_nacl,
225 struct t10_pr_registration *pr_reg,
226 int *format_code,
227 unsigned char *buf)
228{
229 u32 off = 4, padding = 0;
230 u16 len = 0;
231
232 spin_lock_irq(&se_nacl->nacl_sess_lock);
233 /*
234 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
235 */
236 buf[0] = 0x05;
237 /*
238 * From spc4r17 Section 7.5.4.6: TransportID for initiator
239 * ports using SCSI over iSCSI.
240 *
241 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
242 * shall contain the iSCSI name of an iSCSI initiator node (see
243 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
244 * null character terminates the ISCSI NAME field without regard for
245 * the specified length of the iSCSI TransportID or the contents of
246 * the ADDITIONAL LENGTH field.
247 */
248 len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
249 /*
250 * Add Extra byte for NULL terminator
251 */
252 len++;
253 /*
254 * If there is ISID present with the registration and *format code == 1
255 * 1, use iSCSI Initiator port TransportID format.
256 *
257 * Otherwise use iSCSI Initiator device TransportID format that
258 * does not contain the ASCII encoded iSCSI Initiator iSID value
259 * provied by the iSCSi Initiator during the iSCSI login process.
260 */
261 if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
262 /*
263 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
264 * format.
265 */
266 buf[0] |= 0x40;
267 /*
268 * From spc4r17 Section 7.5.4.6: TransportID for initiator
269 * ports using SCSI over iSCSI. Table 390
270 *
271 * The SEPARATOR field shall contain the five ASCII
272 * characters ",i,0x".
273 *
274 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
275 * field shall contain the iSCSI initiator session identifier
276 * (see RFC 3720) in the form of ASCII characters that are the
277 * hexadecimal digits converted from the binary iSCSI initiator
278 * session identifier value. The first ISCSI INITIATOR SESSION
279 * ID field byte containing an ASCII null character
280 */
281 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
282 buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
283 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
284 buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
285 buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
286 len += 5;
287 buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
288 buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
289 buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
290 buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
291 buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
292 buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
293 buf[off+len] = '\0'; off++;
294 len += 7;
295 }
296 spin_unlock_irq(&se_nacl->nacl_sess_lock);
297 /*
298 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
299 * in the TransportID. The additional length shall be at least 20 and
300 * shall be a multiple of four.
301 */
302 padding = ((-len) & 3);
303 if (padding != 0)
304 len += padding;
305
306 buf[2] = ((len >> 8) & 0xff);
307 buf[3] = (len & 0xff);
308 /*
309 * Increment value for total payload + header length for
310 * full status descriptor
311 */
312 len += 4;
313
314 return len;
315}
316EXPORT_SYMBOL(iscsi_get_pr_transport_id);
317
318u32 iscsi_get_pr_transport_id_len(
319 struct se_portal_group *se_tpg,
320 struct se_node_acl *se_nacl,
321 struct t10_pr_registration *pr_reg,
322 int *format_code)
323{
324 u32 len = 0, padding = 0;
325
326 spin_lock_irq(&se_nacl->nacl_sess_lock);
327 len = strlen(se_nacl->initiatorname);
328 /*
329 * Add extra byte for NULL terminator
330 */
331 len++;
332 /*
333 * If there is ISID present with the registration, use format code:
334 * 01b: iSCSI Initiator port TransportID format
335 *
336 * If there is not an active iSCSI session, use format code:
337 * 00b: iSCSI Initiator device TransportID format
338 */
339 if (pr_reg->isid_present_at_reg) {
340 len += 5; /* For ",i,0x" ASCII seperator */
341 len += 7; /* For iSCSI Initiator Session ID + Null terminator */
342 *format_code = 1;
343 } else
344 *format_code = 0;
345 spin_unlock_irq(&se_nacl->nacl_sess_lock);
346 /*
347 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
348 * in the TransportID. The additional length shall be at least 20 and
349 * shall be a multiple of four.
350 */
351 padding = ((-len) & 3);
352 if (padding != 0)
353 len += padding;
354 /*
355 * Increment value for total payload + header length for
356 * full status descriptor
357 */
358 len += 4;
359
360 return len;
361}
362EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
363
364char *iscsi_parse_pr_out_transport_id(
365 struct se_portal_group *se_tpg,
366 const char *buf,
367 u32 *out_tid_len,
368 char **port_nexus_ptr)
369{
370 char *p;
371 u32 tid_len, padding;
372 int i;
373 u16 add_len;
374 u8 format_code = (buf[0] & 0xc0);
375 /*
376 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
377 *
378 * TransportID for initiator ports using SCSI over iSCSI,
379 * from Table 388 -- iSCSI TransportID formats.
380 *
381 * 00b Initiator port is identified using the world wide unique
382 * SCSI device name of the iSCSI initiator
383 * device containing the initiator port (see table 389).
384 * 01b Initiator port is identified using the world wide unique
385 * initiator port identifier (see table 390).10b to 11b
386 * Reserved
387 */
388 if ((format_code != 0x00) && (format_code != 0x40)) {
389 printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
390 " Initiator Transport ID\n", format_code);
391 return NULL;
392 }
393 /*
394 * If the caller wants the TransportID Length, we set that value for the
395 * entire iSCSI Tarnsport ID now.
396 */
397 if (out_tid_len != NULL) {
398 add_len = ((buf[2] >> 8) & 0xff);
399 add_len |= (buf[3] & 0xff);
400
401 tid_len = strlen((char *)&buf[4]);
402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
403 tid_len += 1; /* Add one byte for NULL terminator */
404 padding = ((-tid_len) & 3);
405 if (padding != 0)
406 tid_len += padding;
407
408 if ((add_len + 4) != tid_len) {
409 printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
410 "does not match calculated tid_len: %u,"
411 " using tid_len instead\n", add_len+4, tid_len);
412 *out_tid_len = tid_len;
413 } else
414 *out_tid_len = (add_len + 4);
415 }
416 /*
417 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
418 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
419 * format.
420 */
421 if (format_code == 0x40) {
422 p = strstr((char *)&buf[4], ",i,0x");
423 if (!(p)) {
424 printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
425 " for Initiator port identifier: %s\n",
426 (char *)&buf[4]);
427 return NULL;
428 }
429 *p = '\0'; /* Terminate iSCSI Name */
430 p += 5; /* Skip over ",i,0x" seperator */
431
432 *port_nexus_ptr = p;
433 /*
434 * Go ahead and do the lower case conversion of the received
435 * 12 ASCII characters representing the ISID in the TransportID
436 * for comparision against the running iSCSI session's ISID from
437 * iscsi_target.c:lio_sess_get_initiator_sid()
438 */
439 for (i = 0; i < 12; i++) {
440 if (isdigit(*p)) {
441 p++;
442 continue;
443 }
444 *p = tolower(*p);
445 p++;
446 }
447 }
448
449 return (char *)&buf[4];
450}
451EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 000000000000..0aaca885668f
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
1/*******************************************************************************
2 * Filename: target_core_file.c
3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 *
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/string.h>
31#include <linux/parser.h>
32#include <linux/timer.h>
33#include <linux/blkdev.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/smp_lock.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39
40#include <target/target_core_base.h>
41#include <target/target_core_device.h>
42#include <target/target_core_transport.h>
43
44#include "target_core_file.h"
45
46#if 1
47#define DEBUG_FD_CACHE(x...) printk(x)
48#else
49#define DEBUG_FD_CACHE(x...)
50#endif
51
52#if 1
53#define DEBUG_FD_FUA(x...) printk(x)
54#else
55#define DEBUG_FD_FUA(x...)
56#endif
57
58static struct se_subsystem_api fileio_template;
59
60/* fd_attach_hba(): (Part of se_subsystem_api_t template)
61 *
62 *
63 */
64static int fd_attach_hba(struct se_hba *hba, u32 host_id)
65{
66 struct fd_host *fd_host;
67
68 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
69 if (!(fd_host)) {
70 printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
71 return -1;
72 }
73
74 fd_host->fd_host_id = host_id;
75
76 atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
77 atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
78 hba->hba_ptr = (void *) fd_host;
79
80 printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
81 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
82 TARGET_CORE_MOD_VERSION);
83 printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
84 " Target Core with TCQ Depth: %d MaxSectors: %u\n",
85 hba->hba_id, fd_host->fd_host_id,
86 atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
87
88 return 0;
89}
90
91static void fd_detach_hba(struct se_hba *hba)
92{
93 struct fd_host *fd_host = hba->hba_ptr;
94
95 printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
96 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
97
98 kfree(fd_host);
99 hba->hba_ptr = NULL;
100}
101
102static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
103{
104 struct fd_dev *fd_dev;
105 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
106
107 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
108 if (!(fd_dev)) {
109 printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
110 return NULL;
111 }
112
113 fd_dev->fd_host = fd_host;
114
115 printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
116
117 return fd_dev;
118}
119
120/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
121 *
122 *
123 */
124static struct se_device *fd_create_virtdevice(
125 struct se_hba *hba,
126 struct se_subsystem_dev *se_dev,
127 void *p)
128{
129 char *dev_p = NULL;
130 struct se_device *dev;
131 struct se_dev_limits dev_limits;
132 struct queue_limits *limits;
133 struct fd_dev *fd_dev = (struct fd_dev *) p;
134 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
135 mm_segment_t old_fs;
136 struct file *file;
137 struct inode *inode = NULL;
138 int dev_flags = 0, flags;
139
140 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
141
142 old_fs = get_fs();
143 set_fs(get_ds());
144 dev_p = getname(fd_dev->fd_dev_name);
145 set_fs(old_fs);
146
147 if (IS_ERR(dev_p)) {
148 printk(KERN_ERR "getname(%s) failed: %lu\n",
149 fd_dev->fd_dev_name, IS_ERR(dev_p));
150 goto fail;
151 }
152#if 0
153 if (di->no_create_file)
154 flags = O_RDWR | O_LARGEFILE;
155 else
156 flags = O_RDWR | O_CREAT | O_LARGEFILE;
157#else
158 flags = O_RDWR | O_CREAT | O_LARGEFILE;
159#endif
160/* flags |= O_DIRECT; */
161 /*
162 * If fd_buffered_io=1 has not been set explictly (the default),
163 * use O_SYNC to force FILEIO writes to disk.
164 */
165 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
166 flags |= O_SYNC;
167
168 file = filp_open(dev_p, flags, 0600);
169
170 if (IS_ERR(file) || !file || !file->f_dentry) {
171 printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
172 goto fail;
173 }
174 fd_dev->fd_file = file;
175 /*
176 * If using a block backend with this struct file, we extract
177 * fd_dev->fd_[block,dev]_size from struct block_device.
178 *
179 * Otherwise, we use the passed fd_size= from configfs
180 */
181 inode = file->f_mapping->host;
182 if (S_ISBLK(inode->i_mode)) {
183 struct request_queue *q;
184 /*
185 * Setup the local scope queue_limits from struct request_queue->limits
186 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
187 */
188 q = bdev_get_queue(inode->i_bdev);
189 limits = &dev_limits.limits;
190 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
191 limits->max_hw_sectors = queue_max_hw_sectors(q);
192 limits->max_sectors = queue_max_sectors(q);
193 /*
194 * Determine the number of bytes from i_size_read() minus
195 * one (1) logical sector from underlying struct block_device
196 */
197 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
198 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
199 fd_dev->fd_block_size);
200
201 printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
202 " block_device blocks: %llu logical_block_size: %d\n",
203 fd_dev->fd_dev_size,
204 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
205 fd_dev->fd_block_size);
206 } else {
207 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
208 printk(KERN_ERR "FILEIO: Missing fd_dev_size="
209 " parameter, and no backing struct"
210 " block_device\n");
211 goto fail;
212 }
213
214 limits = &dev_limits.limits;
215 limits->logical_block_size = FD_BLOCKSIZE;
216 limits->max_hw_sectors = FD_MAX_SECTORS;
217 limits->max_sectors = FD_MAX_SECTORS;
218 fd_dev->fd_block_size = FD_BLOCKSIZE;
219 }
220
221 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
222 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
223
224 dev = transport_add_device_to_core_hba(hba, &fileio_template,
225 se_dev, dev_flags, (void *)fd_dev,
226 &dev_limits, "FILEIO", FD_VERSION);
227 if (!(dev))
228 goto fail;
229
230 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
231 fd_dev->fd_queue_depth = dev->queue_depth;
232
233 printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
234 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
235 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
236
237 putname(dev_p);
238 return dev;
239fail:
240 if (fd_dev->fd_file) {
241 filp_close(fd_dev->fd_file, NULL);
242 fd_dev->fd_file = NULL;
243 }
244 putname(dev_p);
245 return NULL;
246}
247
248/* fd_free_device(): (Part of se_subsystem_api_t template)
249 *
250 *
251 */
252static void fd_free_device(void *p)
253{
254 struct fd_dev *fd_dev = (struct fd_dev *) p;
255
256 if (fd_dev->fd_file) {
257 filp_close(fd_dev->fd_file, NULL);
258 fd_dev->fd_file = NULL;
259 }
260
261 kfree(fd_dev);
262}
263
264static inline struct fd_request *FILE_REQ(struct se_task *task)
265{
266 return container_of(task, struct fd_request, fd_task);
267}
268
269
270static struct se_task *
271fd_alloc_task(struct se_cmd *cmd)
272{
273 struct fd_request *fd_req;
274
275 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
276 if (!(fd_req)) {
277 printk(KERN_ERR "Unable to allocate struct fd_request\n");
278 return NULL;
279 }
280
281 fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
282
283 return &fd_req->fd_task;
284}
285
286static int fd_do_readv(struct se_task *task)
287{
288 struct fd_request *req = FILE_REQ(task);
289 struct file *fd = req->fd_dev->fd_file;
290 struct scatterlist *sg = task->task_sg;
291 struct iovec *iov;
292 mm_segment_t old_fs;
293 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
294 int ret = 0, i;
295
296 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
297 if (!(iov)) {
298 printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
299 return -1;
300 }
301
302 for (i = 0; i < task->task_sg_num; i++) {
303 iov[i].iov_len = sg[i].length;
304 iov[i].iov_base = sg_virt(&sg[i]);
305 }
306
307 old_fs = get_fs();
308 set_fs(get_ds());
309 ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
310 set_fs(old_fs);
311
312 kfree(iov);
313 /*
314 * Return zeros and GOOD status even if the READ did not return
315 * the expected virt_size for struct file w/o a backing struct
316 * block_device.
317 */
318 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
319 if (ret < 0 || ret != task->task_size) {
320 printk(KERN_ERR "vfs_readv() returned %d,"
321 " expecting %d for S_ISBLK\n", ret,
322 (int)task->task_size);
323 return -1;
324 }
325 } else {
326 if (ret < 0) {
327 printk(KERN_ERR "vfs_readv() returned %d for non"
328 " S_ISBLK\n", ret);
329 return -1;
330 }
331 }
332
333 return 1;
334}
335
336static int fd_do_writev(struct se_task *task)
337{
338 struct fd_request *req = FILE_REQ(task);
339 struct file *fd = req->fd_dev->fd_file;
340 struct scatterlist *sg = task->task_sg;
341 struct iovec *iov;
342 mm_segment_t old_fs;
343 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
344 int ret, i = 0;
345
346 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
347 if (!(iov)) {
348 printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
349 return -1;
350 }
351
352 for (i = 0; i < task->task_sg_num; i++) {
353 iov[i].iov_len = sg[i].length;
354 iov[i].iov_base = sg_virt(&sg[i]);
355 }
356
357 old_fs = get_fs();
358 set_fs(get_ds());
359 ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
360 set_fs(old_fs);
361
362 kfree(iov);
363
364 if (ret < 0 || ret != task->task_size) {
365 printk(KERN_ERR "vfs_writev() returned %d\n", ret);
366 return -1;
367 }
368
369 return 1;
370}
371
372static void fd_emulate_sync_cache(struct se_task *task)
373{
374 struct se_cmd *cmd = TASK_CMD(task);
375 struct se_device *dev = cmd->se_dev;
376 struct fd_dev *fd_dev = dev->dev_ptr;
377 int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
378 loff_t start, end;
379 int ret;
380
381 /*
382 * If the Immediate bit is set, queue up the GOOD response
383 * for this SYNCHRONIZE_CACHE op
384 */
385 if (immed)
386 transport_complete_sync_cache(cmd, 1);
387
388 /*
389 * Determine if we will be flushing the entire device.
390 */
391 if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
392 start = 0;
393 end = LLONG_MAX;
394 } else {
395 start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
396 if (cmd->data_length)
397 end = start + cmd->data_length;
398 else
399 end = LLONG_MAX;
400 }
401
402 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
403 if (ret != 0)
404 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
405
406 if (!immed)
407 transport_complete_sync_cache(cmd, ret == 0);
408}
409
410/*
411 * Tell TCM Core that we are capable of WriteCache emulation for
412 * an underlying struct se_device.
413 */
414static int fd_emulated_write_cache(struct se_device *dev)
415{
416 return 1;
417}
418
419static int fd_emulated_dpo(struct se_device *dev)
420{
421 return 0;
422}
423/*
424 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
425 * for TYPE_DISK.
426 */
427static int fd_emulated_fua_write(struct se_device *dev)
428{
429 return 1;
430}
431
432static int fd_emulated_fua_read(struct se_device *dev)
433{
434 return 0;
435}
436
437/*
438 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
439 * LBA range basis..
440 */
441static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
442{
443 struct se_device *dev = cmd->se_dev;
444 struct fd_dev *fd_dev = dev->dev_ptr;
445 loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
446 loff_t end = start + task->task_size;
447 int ret;
448
449 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
450 task->task_lba, task->task_size);
451
452 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
453 if (ret != 0)
454 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
455}
456
457static int fd_do_task(struct se_task *task)
458{
459 struct se_cmd *cmd = task->task_se_cmd;
460 struct se_device *dev = cmd->se_dev;
461 int ret = 0;
462
463 /*
464 * Call vectorized fileio functions to map struct scatterlist
465 * physical memory addresses to struct iovec virtual memory.
466 */
467 if (task->task_data_direction == DMA_FROM_DEVICE) {
468 ret = fd_do_readv(task);
469 } else {
470 ret = fd_do_writev(task);
471
472 if (ret > 0 &&
473 DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
474 DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
475 T_TASK(cmd)->t_tasks_fua) {
476 /*
477 * We might need to be a bit smarter here
478 * and return some sense data to let the initiator
479 * know the FUA WRITE cache sync failed..?
480 */
481 fd_emulate_write_fua(cmd, task);
482 }
483
484 }
485
486 if (ret < 0)
487 return ret;
488 if (ret) {
489 task->task_scsi_status = GOOD;
490 transport_complete_task(task, 1);
491 }
492 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
493}
494
495/* fd_free_task(): (Part of se_subsystem_api_t template)
496 *
497 *
498 */
499static void fd_free_task(struct se_task *task)
500{
501 struct fd_request *req = FILE_REQ(task);
502
503 kfree(req);
504}
505
506enum {
507 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
508};
509
510static match_table_t tokens = {
511 {Opt_fd_dev_name, "fd_dev_name=%s"},
512 {Opt_fd_dev_size, "fd_dev_size=%s"},
513 {Opt_fd_buffered_io, "fd_buffered_id=%d"},
514 {Opt_err, NULL}
515};
516
517static ssize_t fd_set_configfs_dev_params(
518 struct se_hba *hba,
519 struct se_subsystem_dev *se_dev,
520 const char *page, ssize_t count)
521{
522 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
523 char *orig, *ptr, *arg_p, *opts;
524 substring_t args[MAX_OPT_ARGS];
525 int ret = 0, arg, token;
526
527 opts = kstrdup(page, GFP_KERNEL);
528 if (!opts)
529 return -ENOMEM;
530
531 orig = opts;
532
533 while ((ptr = strsep(&opts, ",")) != NULL) {
534 if (!*ptr)
535 continue;
536
537 token = match_token(ptr, tokens, args);
538 switch (token) {
539 case Opt_fd_dev_name:
540 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
541 "%s", match_strdup(&args[0]));
542 printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
543 fd_dev->fd_dev_name);
544 fd_dev->fbd_flags |= FBDF_HAS_PATH;
545 break;
546 case Opt_fd_dev_size:
547 arg_p = match_strdup(&args[0]);
548 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
549 if (ret < 0) {
550 printk(KERN_ERR "strict_strtoull() failed for"
551 " fd_dev_size=\n");
552 goto out;
553 }
554 printk(KERN_INFO "FILEIO: Referencing Size: %llu"
555 " bytes\n", fd_dev->fd_dev_size);
556 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
557 break;
558 case Opt_fd_buffered_io:
559 match_int(args, &arg);
560 if (arg != 1) {
561 printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
562 ret = -EINVAL;
563 goto out;
564 }
565
566 printk(KERN_INFO "FILEIO: Using buffered I/O"
567 " operations for struct fd_dev\n");
568
569 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
570 break;
571 default:
572 break;
573 }
574 }
575
576out:
577 kfree(orig);
578 return (!ret) ? count : ret;
579}
580
581static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
582{
583 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
584
585 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
586 printk(KERN_ERR "Missing fd_dev_name=\n");
587 return -1;
588 }
589
590 return 0;
591}
592
593static ssize_t fd_show_configfs_dev_params(
594 struct se_hba *hba,
595 struct se_subsystem_dev *se_dev,
596 char *b)
597{
598 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
599 ssize_t bl = 0;
600
601 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
602 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
603 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
604 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
605 "Buffered" : "Synchronous");
606 return bl;
607}
608
609/* fd_get_cdb(): (Part of se_subsystem_api_t template)
610 *
611 *
612 */
613static unsigned char *fd_get_cdb(struct se_task *task)
614{
615 struct fd_request *req = FILE_REQ(task);
616
617 return req->fd_scsi_cdb;
618}
619
620/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
621 *
622 *
623 */
624static u32 fd_get_device_rev(struct se_device *dev)
625{
626 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
627}
628
629/* fd_get_device_type(): (Part of se_subsystem_api_t template)
630 *
631 *
632 */
633static u32 fd_get_device_type(struct se_device *dev)
634{
635 return TYPE_DISK;
636}
637
638static sector_t fd_get_blocks(struct se_device *dev)
639{
640 struct fd_dev *fd_dev = dev->dev_ptr;
641 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
642 DEV_ATTRIB(dev)->block_size);
643
644 return blocks_long;
645}
646
647static struct se_subsystem_api fileio_template = {
648 .name = "fileio",
649 .owner = THIS_MODULE,
650 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
651 .attach_hba = fd_attach_hba,
652 .detach_hba = fd_detach_hba,
653 .allocate_virtdevice = fd_allocate_virtdevice,
654 .create_virtdevice = fd_create_virtdevice,
655 .free_device = fd_free_device,
656 .dpo_emulated = fd_emulated_dpo,
657 .fua_write_emulated = fd_emulated_fua_write,
658 .fua_read_emulated = fd_emulated_fua_read,
659 .write_cache_emulated = fd_emulated_write_cache,
660 .alloc_task = fd_alloc_task,
661 .do_task = fd_do_task,
662 .do_sync_cache = fd_emulate_sync_cache,
663 .free_task = fd_free_task,
664 .check_configfs_dev_params = fd_check_configfs_dev_params,
665 .set_configfs_dev_params = fd_set_configfs_dev_params,
666 .show_configfs_dev_params = fd_show_configfs_dev_params,
667 .get_cdb = fd_get_cdb,
668 .get_device_rev = fd_get_device_rev,
669 .get_device_type = fd_get_device_type,
670 .get_blocks = fd_get_blocks,
671};
672
673static int __init fileio_module_init(void)
674{
675 return transport_subsystem_register(&fileio_template);
676}
677
678static void fileio_module_exit(void)
679{
680 transport_subsystem_release(&fileio_template);
681}
682
683MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
684MODULE_AUTHOR("nab@Linux-iSCSI.org");
685MODULE_LICENSE("GPL");
686
687module_init(fileio_module_init);
688module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 000000000000..ef4de2b4bd46
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
1#ifndef TARGET_CORE_FILE_H
2#define TARGET_CORE_FILE_H
3
4#define FD_VERSION "4.0"
5
6#define FD_MAX_DEV_NAME 256
7/* Maximum queuedepth for the FILEIO HBA */
8#define FD_HBA_QUEUE_DEPTH 256
9#define FD_DEVICE_QUEUE_DEPTH 32
10#define FD_MAX_DEVICE_QUEUE_DEPTH 128
11#define FD_BLOCKSIZE 512
12#define FD_MAX_SECTORS 1024
13
14#define RRF_EMULATE_CDB 0x01
15#define RRF_GOT_LBA 0x02
16
17struct fd_request {
18 struct se_task fd_task;
19 /* SCSI CDB from iSCSI Command PDU */
20 unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
21 /* FILEIO device */
22 struct fd_dev *fd_dev;
23} ____cacheline_aligned;
24
25#define FBDF_HAS_PATH 0x01
26#define FBDF_HAS_SIZE 0x02
27#define FDBD_USE_BUFFERED_IO 0x04
28
29struct fd_dev {
30 u32 fbd_flags;
31 unsigned char fd_dev_name[FD_MAX_DEV_NAME];
32 /* Unique Ramdisk Device ID in Ramdisk HBA */
33 u32 fd_dev_id;
34 /* Number of SG tables in sg_table_array */
35 u32 fd_table_count;
36 u32 fd_queue_depth;
37 u32 fd_block_size;
38 unsigned long long fd_dev_size;
39 struct file *fd_file;
40 /* FILEIO HBA device is connected to */
41 struct fd_host *fd_host;
42} ____cacheline_aligned;
43
44struct fd_host {
45 u32 fd_host_dev_id_count;
46 /* Unique FILEIO Host ID */
47 u32 fd_host_id;
48} ____cacheline_aligned;
49
50#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 000000000000..4bbe8208b241
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
1/*******************************************************************************
2 * Filename: target_core_hba.c
3 *
4 * This file copntains the iSCSI HBA Transport related functions.
5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/in.h>
36#include <net/sock.h>
37#include <net/tcp.h>
38
39#include <target/target_core_base.h>
40#include <target/target_core_device.h>
41#include <target/target_core_device.h>
42#include <target/target_core_tpg.h>
43#include <target/target_core_transport.h>
44
45#include "target_core_hba.h"
46
47static LIST_HEAD(subsystem_list);
48static DEFINE_MUTEX(subsystem_mutex);
49
50int transport_subsystem_register(struct se_subsystem_api *sub_api)
51{
52 struct se_subsystem_api *s;
53
54 INIT_LIST_HEAD(&sub_api->sub_api_list);
55
56 mutex_lock(&subsystem_mutex);
57 list_for_each_entry(s, &subsystem_list, sub_api_list) {
58 if (!(strcmp(s->name, sub_api->name))) {
59 printk(KERN_ERR "%p is already registered with"
60 " duplicate name %s, unable to process"
61 " request\n", s, s->name);
62 mutex_unlock(&subsystem_mutex);
63 return -EEXIST;
64 }
65 }
66 list_add_tail(&sub_api->sub_api_list, &subsystem_list);
67 mutex_unlock(&subsystem_mutex);
68
69 printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
70 " %p\n", sub_api->name, sub_api->owner);
71 return 0;
72}
73EXPORT_SYMBOL(transport_subsystem_register);
74
75void transport_subsystem_release(struct se_subsystem_api *sub_api)
76{
77 mutex_lock(&subsystem_mutex);
78 list_del(&sub_api->sub_api_list);
79 mutex_unlock(&subsystem_mutex);
80}
81EXPORT_SYMBOL(transport_subsystem_release);
82
83static struct se_subsystem_api *core_get_backend(const char *sub_name)
84{
85 struct se_subsystem_api *s;
86
87 mutex_lock(&subsystem_mutex);
88 list_for_each_entry(s, &subsystem_list, sub_api_list) {
89 if (!strcmp(s->name, sub_name))
90 goto found;
91 }
92 mutex_unlock(&subsystem_mutex);
93 return NULL;
94found:
95 if (s->owner && !try_module_get(s->owner))
96 s = NULL;
97 mutex_unlock(&subsystem_mutex);
98 return s;
99}
100
101struct se_hba *
102core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
103{
104 struct se_hba *hba;
105 int ret = 0;
106
107 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
108 if (!hba) {
109 printk(KERN_ERR "Unable to allocate struct se_hba\n");
110 return ERR_PTR(-ENOMEM);
111 }
112
113 INIT_LIST_HEAD(&hba->hba_dev_list);
114 spin_lock_init(&hba->device_lock);
115 spin_lock_init(&hba->hba_queue_lock);
116 mutex_init(&hba->hba_access_mutex);
117
118 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
119 hba->hba_flags |= hba_flags;
120
121 atomic_set(&hba->max_queue_depth, 0);
122 atomic_set(&hba->left_queue_depth, 0);
123
124 hba->transport = core_get_backend(plugin_name);
125 if (!hba->transport) {
126 ret = -EINVAL;
127 goto out_free_hba;
128 }
129
130 ret = hba->transport->attach_hba(hba, plugin_dep_id);
131 if (ret < 0)
132 goto out_module_put;
133
134 spin_lock(&se_global->hba_lock);
135 hba->hba_id = se_global->g_hba_id_counter++;
136 list_add_tail(&hba->hba_list, &se_global->g_hba_list);
137 spin_unlock(&se_global->hba_lock);
138
139 printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
140 " Core\n", hba->hba_id);
141
142 return hba;
143
144out_module_put:
145 if (hba->transport->owner)
146 module_put(hba->transport->owner);
147 hba->transport = NULL;
148out_free_hba:
149 kfree(hba);
150 return ERR_PTR(ret);
151}
152
153int
154core_delete_hba(struct se_hba *hba)
155{
156 struct se_device *dev, *dev_tmp;
157
158 spin_lock(&hba->device_lock);
159 list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
160
161 se_clear_dev_ports(dev);
162 spin_unlock(&hba->device_lock);
163
164 se_release_device_for_hba(dev);
165
166 spin_lock(&hba->device_lock);
167 }
168 spin_unlock(&hba->device_lock);
169
170 hba->transport->detach_hba(hba);
171
172 spin_lock(&se_global->hba_lock);
173 list_del(&hba->hba_list);
174 spin_unlock(&se_global->hba_lock);
175
176 printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
177 " Core\n", hba->hba_id);
178
179 if (hba->transport->owner)
180 module_put(hba->transport->owner);
181
182 hba->transport = NULL;
183 kfree(hba);
184 return 0;
185}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 000000000000..bb0fea5f730c
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
1#ifndef TARGET_CORE_HBA_H
2#define TARGET_CORE_HBA_H
3
4extern struct se_hba *core_alloc_hba(const char *, u32, u32);
5extern int core_delete_hba(struct se_hba *);
6
7#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 000000000000..c6e0d757e76e
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,808 @@
1/*******************************************************************************
2 * Filename: target_core_iblock.c
3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/version.h>
31#include <linux/string.h>
32#include <linux/parser.h>
33#include <linux/timer.h>
34#include <linux/fs.h>
35#include <linux/blkdev.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/smp_lock.h>
39#include <linux/bio.h>
40#include <linux/genhd.h>
41#include <linux/file.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_host.h>
44
45#include <target/target_core_base.h>
46#include <target/target_core_device.h>
47#include <target/target_core_transport.h>
48
49#include "target_core_iblock.h"
50
51#if 0
52#define DEBUG_IBLOCK(x...) printk(x)
53#else
54#define DEBUG_IBLOCK(x...)
55#endif
56
57static struct se_subsystem_api iblock_template;
58
59static void iblock_bio_done(struct bio *, int);
60
61/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
62 *
63 *
64 */
65static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
66{
67 struct iblock_hba *ib_host;
68
69 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
70 if (!(ib_host)) {
71 printk(KERN_ERR "Unable to allocate memory for"
72 " struct iblock_hba\n");
73 return -ENOMEM;
74 }
75
76 ib_host->iblock_host_id = host_id;
77
78 atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
79 atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
80 hba->hba_ptr = (void *) ib_host;
81
82 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
83 " Generic Target Core Stack %s\n", hba->hba_id,
84 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
85
86 printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
87 " Target Core TCQ Depth: %d\n", hba->hba_id,
88 ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
89
90 return 0;
91}
92
93static void iblock_detach_hba(struct se_hba *hba)
94{
95 struct iblock_hba *ib_host = hba->hba_ptr;
96
97 printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
98 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
99
100 kfree(ib_host);
101 hba->hba_ptr = NULL;
102}
103
104static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
105{
106 struct iblock_dev *ib_dev = NULL;
107 struct iblock_hba *ib_host = hba->hba_ptr;
108
109 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
110 if (!(ib_dev)) {
111 printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
112 return NULL;
113 }
114 ib_dev->ibd_host = ib_host;
115
116 printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
117
118 return ib_dev;
119}
120
121static struct se_device *iblock_create_virtdevice(
122 struct se_hba *hba,
123 struct se_subsystem_dev *se_dev,
124 void *p)
125{
126 struct iblock_dev *ib_dev = p;
127 struct se_device *dev;
128 struct se_dev_limits dev_limits;
129 struct block_device *bd = NULL;
130 struct request_queue *q;
131 struct queue_limits *limits;
132 u32 dev_flags = 0;
133
134 if (!(ib_dev)) {
135 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
136 return 0;
137 }
138 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
139 /*
140 * These settings need to be made tunable..
141 */
142 ib_dev->ibd_bio_set = bioset_create(32, 64);
143 if (!(ib_dev->ibd_bio_set)) {
144 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
145 return 0;
146 }
147 printk(KERN_INFO "IBLOCK: Created bio_set()\n");
148 /*
149 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
150 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
151 */
152 printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
153 ib_dev->ibd_udev_path);
154
155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
157 if (!(bd))
158 goto failed;
159 /*
160 * Setup the local scope queue_limits from struct request_queue->limits
161 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
162 */
163 q = bdev_get_queue(bd);
164 limits = &dev_limits.limits;
165 limits->logical_block_size = bdev_logical_block_size(bd);
166 limits->max_hw_sectors = queue_max_hw_sectors(q);
167 limits->max_sectors = queue_max_sectors(q);
168 dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
169 dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
170
171 ib_dev->ibd_major = MAJOR(bd->bd_dev);
172 ib_dev->ibd_minor = MINOR(bd->bd_dev);
173 ib_dev->ibd_bd = bd;
174
175 dev = transport_add_device_to_core_hba(hba,
176 &iblock_template, se_dev, dev_flags, (void *)ib_dev,
177 &dev_limits, "IBLOCK", IBLOCK_VERSION);
178 if (!(dev))
179 goto failed;
180
181 ib_dev->ibd_depth = dev->queue_depth;
182
183 /*
184 * Check if the underlying struct block_device request_queue supports
185 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
186 * in ATA and we need to set TPE=1
187 */
188 if (blk_queue_discard(bdev_get_queue(bd))) {
189 struct request_queue *q = bdev_get_queue(bd);
190
191 DEV_ATTRIB(dev)->max_unmap_lba_count =
192 q->limits.max_discard_sectors;
193 /*
194 * Currently hardcoded to 1 in Linux/SCSI code..
195 */
196 DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
197 DEV_ATTRIB(dev)->unmap_granularity =
198 q->limits.discard_granularity;
199 DEV_ATTRIB(dev)->unmap_granularity_alignment =
200 q->limits.discard_alignment;
201
202 printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
203 " disabled by default\n");
204 }
205
206 return dev;
207
208failed:
209 if (ib_dev->ibd_bio_set) {
210 bioset_free(ib_dev->ibd_bio_set);
211 ib_dev->ibd_bio_set = NULL;
212 }
213 ib_dev->ibd_bd = NULL;
214 ib_dev->ibd_major = 0;
215 ib_dev->ibd_minor = 0;
216 return NULL;
217}
218
219static void iblock_free_device(void *p)
220{
221 struct iblock_dev *ib_dev = p;
222
223 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
224 bioset_free(ib_dev->ibd_bio_set);
225 kfree(ib_dev);
226}
227
228static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
229{
230 return container_of(task, struct iblock_req, ib_task);
231}
232
233static struct se_task *
234iblock_alloc_task(struct se_cmd *cmd)
235{
236 struct iblock_req *ib_req;
237
238 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
239 if (!(ib_req)) {
240 printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
241 return NULL;
242 }
243
244 ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
245 atomic_set(&ib_req->ib_bio_cnt, 0);
246 return &ib_req->ib_task;
247}
248
249static unsigned long long iblock_emulate_read_cap_with_block_size(
250 struct se_device *dev,
251 struct block_device *bd,
252 struct request_queue *q)
253{
254 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
255 bdev_logical_block_size(bd)) - 1);
256 u32 block_size = bdev_logical_block_size(bd);
257
258 if (block_size == DEV_ATTRIB(dev)->block_size)
259 return blocks_long;
260
261 switch (block_size) {
262 case 4096:
263 switch (DEV_ATTRIB(dev)->block_size) {
264 case 2048:
265 blocks_long <<= 1;
266 break;
267 case 1024:
268 blocks_long <<= 2;
269 break;
270 case 512:
271 blocks_long <<= 3;
272 default:
273 break;
274 }
275 break;
276 case 2048:
277 switch (DEV_ATTRIB(dev)->block_size) {
278 case 4096:
279 blocks_long >>= 1;
280 break;
281 case 1024:
282 blocks_long <<= 1;
283 break;
284 case 512:
285 blocks_long <<= 2;
286 break;
287 default:
288 break;
289 }
290 break;
291 case 1024:
292 switch (DEV_ATTRIB(dev)->block_size) {
293 case 4096:
294 blocks_long >>= 2;
295 break;
296 case 2048:
297 blocks_long >>= 1;
298 break;
299 case 512:
300 blocks_long <<= 1;
301 break;
302 default:
303 break;
304 }
305 break;
306 case 512:
307 switch (DEV_ATTRIB(dev)->block_size) {
308 case 4096:
309 blocks_long >>= 3;
310 break;
311 case 2048:
312 blocks_long >>= 2;
313 break;
314 case 1024:
315 blocks_long >>= 1;
316 break;
317 default:
318 break;
319 }
320 break;
321 default:
322 break;
323 }
324
325 return blocks_long;
326}
327
328/*
329 * Emulate SYCHRONIZE_CACHE_*
330 */
331static void iblock_emulate_sync_cache(struct se_task *task)
332{
333 struct se_cmd *cmd = TASK_CMD(task);
334 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
335 int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
336 sector_t error_sector;
337 int ret;
338
339 /*
340 * If the Immediate bit is set, queue up the GOOD response
341 * for this SYNCHRONIZE_CACHE op
342 */
343 if (immed)
344 transport_complete_sync_cache(cmd, 1);
345
346 /*
347 * blkdev_issue_flush() does not support a specifying a range, so
348 * we have to flush the entire cache.
349 */
350 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
351 if (ret != 0) {
352 printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
353 " error_sector: %llu\n", ret,
354 (unsigned long long)error_sector);
355 }
356
357 if (!immed)
358 transport_complete_sync_cache(cmd, ret == 0);
359}
360
361/*
362 * Tell TCM Core that we are capable of WriteCache emulation for
363 * an underlying struct se_device.
364 */
365static int iblock_emulated_write_cache(struct se_device *dev)
366{
367 return 1;
368}
369
370static int iblock_emulated_dpo(struct se_device *dev)
371{
372 return 0;
373}
374
375/*
376 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
377 * for TYPE_DISK.
378 */
379static int iblock_emulated_fua_write(struct se_device *dev)
380{
381 return 1;
382}
383
384static int iblock_emulated_fua_read(struct se_device *dev)
385{
386 return 0;
387}
388
389static int iblock_do_task(struct se_task *task)
390{
391 struct se_device *dev = task->task_se_cmd->se_dev;
392 struct iblock_req *req = IBLOCK_REQ(task);
393 struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
394 struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
395 struct bio *bio = req->ib_bio, *nbio = NULL;
396 int rw;
397
398 if (task->task_data_direction == DMA_TO_DEVICE) {
399 /*
400 * Force data to disk if we pretend to not have a volatile
401 * write cache, or the initiator set the Force Unit Access bit.
402 */
403 if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
404 (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
405 T_TASK(task->task_se_cmd)->t_tasks_fua))
406 rw = WRITE_FUA;
407 else
408 rw = WRITE;
409 } else {
410 rw = READ;
411 }
412
413 while (bio) {
414 nbio = bio->bi_next;
415 bio->bi_next = NULL;
416 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
417 " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
418
419 submit_bio(rw, bio);
420 bio = nbio;
421 }
422
423 if (q->unplug_fn)
424 q->unplug_fn(q);
425 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
426}
427
428static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
429{
430 struct iblock_dev *ibd = dev->dev_ptr;
431 struct block_device *bd = ibd->ibd_bd;
432 int barrier = 0;
433
434 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
435}
436
437static void iblock_free_task(struct se_task *task)
438{
439 struct iblock_req *req = IBLOCK_REQ(task);
440 struct bio *bio, *hbio = req->ib_bio;
441 /*
442 * We only release the bio(s) here if iblock_bio_done() has not called
443 * bio_put() -> iblock_bio_destructor().
444 */
445 while (hbio != NULL) {
446 bio = hbio;
447 hbio = hbio->bi_next;
448 bio->bi_next = NULL;
449 bio_put(bio);
450 }
451
452 kfree(req);
453}
454
455enum {
456 Opt_udev_path, Opt_force, Opt_err
457};
458
459static match_table_t tokens = {
460 {Opt_udev_path, "udev_path=%s"},
461 {Opt_force, "force=%d"},
462 {Opt_err, NULL}
463};
464
465static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
466 struct se_subsystem_dev *se_dev,
467 const char *page, ssize_t count)
468{
469 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
470 char *orig, *ptr, *opts;
471 substring_t args[MAX_OPT_ARGS];
472 int ret = 0, arg, token;
473
474 opts = kstrdup(page, GFP_KERNEL);
475 if (!opts)
476 return -ENOMEM;
477
478 orig = opts;
479
480 while ((ptr = strsep(&opts, ",")) != NULL) {
481 if (!*ptr)
482 continue;
483
484 token = match_token(ptr, tokens, args);
485 switch (token) {
486 case Opt_udev_path:
487 if (ib_dev->ibd_bd) {
488 printk(KERN_ERR "Unable to set udev_path= while"
489 " ib_dev->ibd_bd exists\n");
490 ret = -EEXIST;
491 goto out;
492 }
493
494 ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
495 "%s", match_strdup(&args[0]));
496 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
497 ib_dev->ibd_udev_path);
498 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
499 break;
500 case Opt_force:
501 match_int(args, &arg);
502 ib_dev->ibd_force = arg;
503 printk(KERN_INFO "IBLOCK: Set force=%d\n",
504 ib_dev->ibd_force);
505 break;
506 default:
507 break;
508 }
509 }
510
511out:
512 kfree(orig);
513 return (!ret) ? count : ret;
514}
515
516static ssize_t iblock_check_configfs_dev_params(
517 struct se_hba *hba,
518 struct se_subsystem_dev *se_dev)
519{
520 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
521
522 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
523 printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
524 return -1;
525 }
526
527 return 0;
528}
529
530static ssize_t iblock_show_configfs_dev_params(
531 struct se_hba *hba,
532 struct se_subsystem_dev *se_dev,
533 char *b)
534{
535 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
536 struct block_device *bd = ibd->ibd_bd;
537 char buf[BDEVNAME_SIZE];
538 ssize_t bl = 0;
539
540 if (bd)
541 bl += sprintf(b + bl, "iBlock device: %s",
542 bdevname(bd, buf));
543 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
544 bl += sprintf(b + bl, " UDEV PATH: %s\n",
545 ibd->ibd_udev_path);
546 } else
547 bl += sprintf(b + bl, "\n");
548
549 bl += sprintf(b + bl, " ");
550 if (bd) {
551 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
552 ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
553 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
554 "CLAIMED: IBLOCK" : "CLAIMED: OS");
555 } else {
556 bl += sprintf(b + bl, "Major: %d Minor: %d\n",
557 ibd->ibd_major, ibd->ibd_minor);
558 }
559
560 return bl;
561}
562
563static void iblock_bio_destructor(struct bio *bio)
564{
565 struct se_task *task = bio->bi_private;
566 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
567
568 bio_free(bio, ib_dev->ibd_bio_set);
569}
570
571static struct bio *iblock_get_bio(
572 struct se_task *task,
573 struct iblock_req *ib_req,
574 struct iblock_dev *ib_dev,
575 int *ret,
576 sector_t lba,
577 u32 sg_num)
578{
579 struct bio *bio;
580
581 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
582 if (!(bio)) {
583 printk(KERN_ERR "Unable to allocate memory for bio\n");
584 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
585 return NULL;
586 }
587
588 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
589 " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
590 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
591
592 bio->bi_bdev = ib_dev->ibd_bd;
593 bio->bi_private = (void *) task;
594 bio->bi_destructor = iblock_bio_destructor;
595 bio->bi_end_io = &iblock_bio_done;
596 bio->bi_sector = lba;
597 atomic_inc(&ib_req->ib_bio_cnt);
598
599 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
600 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
601 atomic_read(&ib_req->ib_bio_cnt));
602 return bio;
603}
604
605static int iblock_map_task_SG(struct se_task *task)
606{
607 struct se_cmd *cmd = task->task_se_cmd;
608 struct se_device *dev = SE_DEV(cmd);
609 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
610 struct iblock_req *ib_req = IBLOCK_REQ(task);
611 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
612 struct scatterlist *sg;
613 int ret = 0;
614 u32 i, sg_num = task->task_sg_num;
615 sector_t block_lba;
616 /*
617 * Do starting conversion up from non 512-byte blocksize with
618 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
619 */
620 if (DEV_ATTRIB(dev)->block_size == 4096)
621 block_lba = (task->task_lba << 3);
622 else if (DEV_ATTRIB(dev)->block_size == 2048)
623 block_lba = (task->task_lba << 2);
624 else if (DEV_ATTRIB(dev)->block_size == 1024)
625 block_lba = (task->task_lba << 1);
626 else if (DEV_ATTRIB(dev)->block_size == 512)
627 block_lba = task->task_lba;
628 else {
629 printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
630 " %u\n", DEV_ATTRIB(dev)->block_size);
631 return PYX_TRANSPORT_LU_COMM_FAILURE;
632 }
633
634 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
635 if (!(bio))
636 return ret;
637
638 ib_req->ib_bio = bio;
639 hbio = tbio = bio;
640 /*
641 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
642 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
643 */
644 for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
645 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
646 " %p len: %u offset: %u\n", task, bio, sg_page(sg),
647 sg->length, sg->offset);
648again:
649 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
650 if (ret != sg->length) {
651
652 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
653 bio->bi_sector);
654 DEBUG_IBLOCK("** task->task_size: %u\n",
655 task->task_size);
656 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
657 bio->bi_max_vecs);
658 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
659 bio->bi_vcnt);
660
661 bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
662 block_lba, sg_num);
663 if (!(bio))
664 goto fail;
665
666 tbio = tbio->bi_next = bio;
667 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
668 " list, Going to again\n", bio);
669 goto again;
670 }
671 /* Always in 512 byte units for Linux/Block */
672 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
673 sg_num--;
674 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
675 " sg_num to %u\n", task, sg_num);
676 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
677 " to %llu\n", task, block_lba);
678 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
679 " %u\n", task, bio->bi_vcnt);
680 }
681
682 return 0;
683fail:
684 while (hbio) {
685 bio = hbio;
686 hbio = hbio->bi_next;
687 bio->bi_next = NULL;
688 bio_put(bio);
689 }
690 return ret;
691}
692
693static unsigned char *iblock_get_cdb(struct se_task *task)
694{
695 return IBLOCK_REQ(task)->ib_scsi_cdb;
696}
697
698static u32 iblock_get_device_rev(struct se_device *dev)
699{
700 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
701}
702
703static u32 iblock_get_device_type(struct se_device *dev)
704{
705 return TYPE_DISK;
706}
707
708static sector_t iblock_get_blocks(struct se_device *dev)
709{
710 struct iblock_dev *ibd = dev->dev_ptr;
711 struct block_device *bd = ibd->ibd_bd;
712 struct request_queue *q = bdev_get_queue(bd);
713
714 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
715}
716
717static void iblock_bio_done(struct bio *bio, int err)
718{
719 struct se_task *task = bio->bi_private;
720 struct iblock_req *ibr = IBLOCK_REQ(task);
721 /*
722 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
723 */
724 if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
725 err = -EIO;
726
727 if (err != 0) {
728 printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
729 " err: %d\n", bio, err);
730 /*
731 * Bump the ib_bio_err_cnt and release bio.
732 */
733 atomic_inc(&ibr->ib_bio_err_cnt);
734 smp_mb__after_atomic_inc();
735 bio_put(bio);
736 /*
737 * Wait to complete the task until the last bio as completed.
738 */
739 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
740 return;
741
742 ibr->ib_bio = NULL;
743 transport_complete_task(task, 0);
744 return;
745 }
746 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
747 task, bio, task->task_lba, bio->bi_sector, err);
748 /*
749 * bio_put() will call iblock_bio_destructor() to release the bio back
750 * to ibr->ib_bio_set.
751 */
752 bio_put(bio);
753 /*
754 * Wait to complete the task until the last bio as completed.
755 */
756 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
757 return;
758 /*
759 * Return GOOD status for task if zero ib_bio_err_cnt exists.
760 */
761 ibr->ib_bio = NULL;
762 transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
763}
764
765static struct se_subsystem_api iblock_template = {
766 .name = "iblock",
767 .owner = THIS_MODULE,
768 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
769 .map_task_SG = iblock_map_task_SG,
770 .attach_hba = iblock_attach_hba,
771 .detach_hba = iblock_detach_hba,
772 .allocate_virtdevice = iblock_allocate_virtdevice,
773 .create_virtdevice = iblock_create_virtdevice,
774 .free_device = iblock_free_device,
775 .dpo_emulated = iblock_emulated_dpo,
776 .fua_write_emulated = iblock_emulated_fua_write,
777 .fua_read_emulated = iblock_emulated_fua_read,
778 .write_cache_emulated = iblock_emulated_write_cache,
779 .alloc_task = iblock_alloc_task,
780 .do_task = iblock_do_task,
781 .do_discard = iblock_do_discard,
782 .do_sync_cache = iblock_emulate_sync_cache,
783 .free_task = iblock_free_task,
784 .check_configfs_dev_params = iblock_check_configfs_dev_params,
785 .set_configfs_dev_params = iblock_set_configfs_dev_params,
786 .show_configfs_dev_params = iblock_show_configfs_dev_params,
787 .get_cdb = iblock_get_cdb,
788 .get_device_rev = iblock_get_device_rev,
789 .get_device_type = iblock_get_device_type,
790 .get_blocks = iblock_get_blocks,
791};
792
793static int __init iblock_module_init(void)
794{
795 return transport_subsystem_register(&iblock_template);
796}
797
798static void iblock_module_exit(void)
799{
800 transport_subsystem_release(&iblock_template);
801}
802
803MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
804MODULE_AUTHOR("nab@Linux-iSCSI.org");
805MODULE_LICENSE("GPL");
806
807module_init(iblock_module_init);
808module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 000000000000..64c1f4d69f76
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
1#ifndef TARGET_CORE_IBLOCK_H
2#define TARGET_CORE_IBLOCK_H
3
4#define IBLOCK_VERSION "4.0"
5
6#define IBLOCK_HBA_QUEUE_DEPTH 512
7#define IBLOCK_DEVICE_QUEUE_DEPTH 32
8#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
9#define IBLOCK_MAX_CDBS 16
10#define IBLOCK_LBA_SHIFT 9
11
12struct iblock_req {
13 struct se_task ib_task;
14 unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
15 atomic_t ib_bio_cnt;
16 atomic_t ib_bio_err_cnt;
17 struct bio *ib_bio;
18 struct iblock_dev *ib_dev;
19} ____cacheline_aligned;
20
21#define IBDF_HAS_UDEV_PATH 0x01
22#define IBDF_HAS_FORCE 0x02
23
24struct iblock_dev {
25 unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
26 int ibd_force;
27 int ibd_major;
28 int ibd_minor;
29 u32 ibd_depth;
30 u32 ibd_flags;
31 struct bio_set *ibd_bio_set;
32 struct block_device *ibd_bd;
33 struct iblock_hba *ibd_host;
34} ____cacheline_aligned;
35
36struct iblock_hba {
37 int iblock_host_id;
38} ____cacheline_aligned;
39
40#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
new file mode 100644
index 000000000000..d5a48aa0d2d1
--- /dev/null
+++ b/drivers/target/target_core_mib.c
@@ -0,0 +1,1078 @@
1/*******************************************************************************
2 * Filename: target_core_mib.c
3 *
4 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
5 * Copyright (c) 2007-2010 Rising Tide Systems
6 * Copyright (c) 2008-2010 Linux-iSCSI.org
7 *
8 * Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/string.h>
32#include <linux/version.h>
33#include <generated/utsrelease.h>
34#include <linux/utsname.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/blkdev.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_configfs.h>
46
47#include "target_core_hba.h"
48#include "target_core_mib.h"
49
50/* SCSI mib table index */
51static struct scsi_index_table scsi_index_table;
52
53#ifndef INITIAL_JIFFIES
54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
55#endif
56
57/* SCSI Instance Table */
58#define SCSI_INST_SW_INDEX 1
59#define SCSI_TRANSPORT_INDEX 1
60
61#define NONE "None"
62#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
63
64static inline int list_is_first(const struct list_head *list,
65 const struct list_head *head)
66{
67 return list->prev == head;
68}
69
70static void *locate_hba_start(
71 struct seq_file *seq,
72 loff_t *pos)
73{
74 spin_lock(&se_global->g_device_lock);
75 return seq_list_start(&se_global->g_se_dev_list, *pos);
76}
77
78static void *locate_hba_next(
79 struct seq_file *seq,
80 void *v,
81 loff_t *pos)
82{
83 return seq_list_next(v, &se_global->g_se_dev_list, pos);
84}
85
86static void locate_hba_stop(struct seq_file *seq, void *v)
87{
88 spin_unlock(&se_global->g_device_lock);
89}
90
91/****************************************************************************
92 * SCSI MIB Tables
93 ****************************************************************************/
94
95/*
96 * SCSI Instance Table
97 */
98static void *scsi_inst_seq_start(
99 struct seq_file *seq,
100 loff_t *pos)
101{
102 spin_lock(&se_global->hba_lock);
103 return seq_list_start(&se_global->g_hba_list, *pos);
104}
105
106static void *scsi_inst_seq_next(
107 struct seq_file *seq,
108 void *v,
109 loff_t *pos)
110{
111 return seq_list_next(v, &se_global->g_hba_list, pos);
112}
113
114static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
115{
116 spin_unlock(&se_global->hba_lock);
117}
118
119static int scsi_inst_seq_show(struct seq_file *seq, void *v)
120{
121 struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
122
123 if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
124 seq_puts(seq, "inst sw_indx\n");
125
126 seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
127 seq_printf(seq, "plugin: %s version: %s\n",
128 hba->transport->name, TARGET_CORE_VERSION);
129
130 return 0;
131}
132
133static const struct seq_operations scsi_inst_seq_ops = {
134 .start = scsi_inst_seq_start,
135 .next = scsi_inst_seq_next,
136 .stop = scsi_inst_seq_stop,
137 .show = scsi_inst_seq_show
138};
139
140static int scsi_inst_seq_open(struct inode *inode, struct file *file)
141{
142 return seq_open(file, &scsi_inst_seq_ops);
143}
144
145static const struct file_operations scsi_inst_seq_fops = {
146 .owner = THIS_MODULE,
147 .open = scsi_inst_seq_open,
148 .read = seq_read,
149 .llseek = seq_lseek,
150 .release = seq_release,
151};
152
153/*
154 * SCSI Device Table
155 */
156static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
157{
158 return locate_hba_start(seq, pos);
159}
160
161static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
162{
163 return locate_hba_next(seq, v, pos);
164}
165
166static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
167{
168 locate_hba_stop(seq, v);
169}
170
171static int scsi_dev_seq_show(struct seq_file *seq, void *v)
172{
173 struct se_hba *hba;
174 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
175 g_se_dev_list);
176 struct se_device *dev = se_dev->se_dev_ptr;
177 char str[28];
178 int k;
179
180 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
181 seq_puts(seq, "inst indx role ports\n");
182
183 if (!(dev))
184 return 0;
185
186 hba = dev->se_hba;
187 if (!(hba)) {
188 /* Log error ? */
189 return 0;
190 }
191
192 seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
193 dev->dev_index, "Target", dev->dev_port_count);
194
195 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
196
197 /* vendor */
198 for (k = 0; k < 8; k++)
199 str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
200 DEV_T10_WWN(dev)->vendor[k] : 0x20;
201 str[k] = 0x20;
202
203 /* model */
204 for (k = 0; k < 16; k++)
205 str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
206 DEV_T10_WWN(dev)->model[k] : 0x20;
207 str[k + 9] = 0;
208
209 seq_printf(seq, "dev_alias: %s\n", str);
210
211 return 0;
212}
213
214static const struct seq_operations scsi_dev_seq_ops = {
215 .start = scsi_dev_seq_start,
216 .next = scsi_dev_seq_next,
217 .stop = scsi_dev_seq_stop,
218 .show = scsi_dev_seq_show
219};
220
221static int scsi_dev_seq_open(struct inode *inode, struct file *file)
222{
223 return seq_open(file, &scsi_dev_seq_ops);
224}
225
226static const struct file_operations scsi_dev_seq_fops = {
227 .owner = THIS_MODULE,
228 .open = scsi_dev_seq_open,
229 .read = seq_read,
230 .llseek = seq_lseek,
231 .release = seq_release,
232};
233
234/*
235 * SCSI Port Table
236 */
237static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
238{
239 return locate_hba_start(seq, pos);
240}
241
242static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
243{
244 return locate_hba_next(seq, v, pos);
245}
246
247static void scsi_port_seq_stop(struct seq_file *seq, void *v)
248{
249 locate_hba_stop(seq, v);
250}
251
252static int scsi_port_seq_show(struct seq_file *seq, void *v)
253{
254 struct se_hba *hba;
255 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
256 g_se_dev_list);
257 struct se_device *dev = se_dev->se_dev_ptr;
258 struct se_port *sep, *sep_tmp;
259
260 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
261 seq_puts(seq, "inst device indx role busy_count\n");
262
263 if (!(dev))
264 return 0;
265
266 hba = dev->se_hba;
267 if (!(hba)) {
268 /* Log error ? */
269 return 0;
270 }
271
272 /* FIXME: scsiPortBusyStatuses count */
273 spin_lock(&dev->se_port_lock);
274 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
275 seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
276 dev->dev_index, sep->sep_index, "Device",
277 dev->dev_index, 0);
278 }
279 spin_unlock(&dev->se_port_lock);
280
281 return 0;
282}
283
284static const struct seq_operations scsi_port_seq_ops = {
285 .start = scsi_port_seq_start,
286 .next = scsi_port_seq_next,
287 .stop = scsi_port_seq_stop,
288 .show = scsi_port_seq_show
289};
290
291static int scsi_port_seq_open(struct inode *inode, struct file *file)
292{
293 return seq_open(file, &scsi_port_seq_ops);
294}
295
296static const struct file_operations scsi_port_seq_fops = {
297 .owner = THIS_MODULE,
298 .open = scsi_port_seq_open,
299 .read = seq_read,
300 .llseek = seq_lseek,
301 .release = seq_release,
302};
303
304/*
305 * SCSI Transport Table
306 */
307static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
308{
309 return locate_hba_start(seq, pos);
310}
311
312static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
313{
314 return locate_hba_next(seq, v, pos);
315}
316
317static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
318{
319 locate_hba_stop(seq, v);
320}
321
322static int scsi_transport_seq_show(struct seq_file *seq, void *v)
323{
324 struct se_hba *hba;
325 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
326 g_se_dev_list);
327 struct se_device *dev = se_dev->se_dev_ptr;
328 struct se_port *se, *se_tmp;
329 struct se_portal_group *tpg;
330 struct t10_wwn *wwn;
331 char buf[64];
332
333 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
334 seq_puts(seq, "inst device indx dev_name\n");
335
336 if (!(dev))
337 return 0;
338
339 hba = dev->se_hba;
340 if (!(hba)) {
341 /* Log error ? */
342 return 0;
343 }
344
345 wwn = DEV_T10_WWN(dev);
346
347 spin_lock(&dev->se_port_lock);
348 list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
349 tpg = se->sep_tpg;
350 sprintf(buf, "scsiTransport%s",
351 TPG_TFO(tpg)->get_fabric_name());
352
353 seq_printf(seq, "%u %s %u %s+%s\n",
354 hba->hba_index, /* scsiTransportIndex */
355 buf, /* scsiTransportType */
356 (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
357 TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
358 0,
359 TPG_TFO(tpg)->tpg_get_wwn(tpg),
360 (strlen(wwn->unit_serial)) ?
361 /* scsiTransportDevName */
362 wwn->unit_serial : wwn->vendor);
363 }
364 spin_unlock(&dev->se_port_lock);
365
366 return 0;
367}
368
369static const struct seq_operations scsi_transport_seq_ops = {
370 .start = scsi_transport_seq_start,
371 .next = scsi_transport_seq_next,
372 .stop = scsi_transport_seq_stop,
373 .show = scsi_transport_seq_show
374};
375
376static int scsi_transport_seq_open(struct inode *inode, struct file *file)
377{
378 return seq_open(file, &scsi_transport_seq_ops);
379}
380
381static const struct file_operations scsi_transport_seq_fops = {
382 .owner = THIS_MODULE,
383 .open = scsi_transport_seq_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = seq_release,
387};
388
389/*
390 * SCSI Target Device Table
391 */
392static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
393{
394 return locate_hba_start(seq, pos);
395}
396
397static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
398{
399 return locate_hba_next(seq, v, pos);
400}
401
402static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
403{
404 locate_hba_stop(seq, v);
405}
406
407
408#define LU_COUNT 1 /* for now */
409static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
410{
411 struct se_hba *hba;
412 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
413 g_se_dev_list);
414 struct se_device *dev = se_dev->se_dev_ptr;
415 int non_accessible_lus = 0;
416 char status[16];
417
418 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
419 seq_puts(seq, "inst indx num_LUs status non_access_LUs"
420 " resets\n");
421
422 if (!(dev))
423 return 0;
424
425 hba = dev->se_hba;
426 if (!(hba)) {
427 /* Log error ? */
428 return 0;
429 }
430
431 switch (dev->dev_status) {
432 case TRANSPORT_DEVICE_ACTIVATED:
433 strcpy(status, "activated");
434 break;
435 case TRANSPORT_DEVICE_DEACTIVATED:
436 strcpy(status, "deactivated");
437 non_accessible_lus = 1;
438 break;
439 case TRANSPORT_DEVICE_SHUTDOWN:
440 strcpy(status, "shutdown");
441 non_accessible_lus = 1;
442 break;
443 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
444 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
445 strcpy(status, "offline");
446 non_accessible_lus = 1;
447 break;
448 default:
449 sprintf(status, "unknown(%d)", dev->dev_status);
450 non_accessible_lus = 1;
451 }
452
453 seq_printf(seq, "%u %u %u %s %u %u\n",
454 hba->hba_index, dev->dev_index, LU_COUNT,
455 status, non_accessible_lus, dev->num_resets);
456
457 return 0;
458}
459
460static const struct seq_operations scsi_tgt_dev_seq_ops = {
461 .start = scsi_tgt_dev_seq_start,
462 .next = scsi_tgt_dev_seq_next,
463 .stop = scsi_tgt_dev_seq_stop,
464 .show = scsi_tgt_dev_seq_show
465};
466
467static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
468{
469 return seq_open(file, &scsi_tgt_dev_seq_ops);
470}
471
472static const struct file_operations scsi_tgt_dev_seq_fops = {
473 .owner = THIS_MODULE,
474 .open = scsi_tgt_dev_seq_open,
475 .read = seq_read,
476 .llseek = seq_lseek,
477 .release = seq_release,
478};
479
480/*
481 * SCSI Target Port Table
482 */
483static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
484{
485 return locate_hba_start(seq, pos);
486}
487
488static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
489{
490 return locate_hba_next(seq, v, pos);
491}
492
493static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
494{
495 locate_hba_stop(seq, v);
496}
497
498static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
499{
500 struct se_hba *hba;
501 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
502 g_se_dev_list);
503 struct se_device *dev = se_dev->se_dev_ptr;
504 struct se_port *sep, *sep_tmp;
505 struct se_portal_group *tpg;
506 u32 rx_mbytes, tx_mbytes;
507 unsigned long long num_cmds;
508 char buf[64];
509
510 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
511 seq_puts(seq, "inst device indx name port_index in_cmds"
512 " write_mbytes read_mbytes hs_in_cmds\n");
513
514 if (!(dev))
515 return 0;
516
517 hba = dev->se_hba;
518 if (!(hba)) {
519 /* Log error ? */
520 return 0;
521 }
522
523 spin_lock(&dev->se_port_lock);
524 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
525 tpg = sep->sep_tpg;
526 sprintf(buf, "%sPort#",
527 TPG_TFO(tpg)->get_fabric_name());
528
529 seq_printf(seq, "%u %u %u %s%d %s%s%d ",
530 hba->hba_index,
531 dev->dev_index,
532 sep->sep_index,
533 buf, sep->sep_index,
534 TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
535 TPG_TFO(tpg)->tpg_get_tag(tpg));
536
537 spin_lock(&sep->sep_lun->lun_sep_lock);
538 num_cmds = sep->sep_stats.cmd_pdus;
539 rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
540 tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
541 spin_unlock(&sep->sep_lun->lun_sep_lock);
542
543 seq_printf(seq, "%llu %u %u %u\n", num_cmds,
544 rx_mbytes, tx_mbytes, 0);
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return 0;
549}
550
551static const struct seq_operations scsi_tgt_port_seq_ops = {
552 .start = scsi_tgt_port_seq_start,
553 .next = scsi_tgt_port_seq_next,
554 .stop = scsi_tgt_port_seq_stop,
555 .show = scsi_tgt_port_seq_show
556};
557
558static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
559{
560 return seq_open(file, &scsi_tgt_port_seq_ops);
561}
562
563static const struct file_operations scsi_tgt_port_seq_fops = {
564 .owner = THIS_MODULE,
565 .open = scsi_tgt_port_seq_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = seq_release,
569};
570
571/*
572 * SCSI Authorized Initiator Table:
573 * It contains the SCSI Initiators authorized to be attached to one of the
574 * local Target ports.
575 * Iterates through all active TPGs and extracts the info from the ACLs
576 */
577static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
578{
579 spin_lock_bh(&se_global->se_tpg_lock);
580 return seq_list_start(&se_global->g_se_tpg_list, *pos);
581}
582
583static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
584 loff_t *pos)
585{
586 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
587}
588
589static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
590{
591 spin_unlock_bh(&se_global->se_tpg_lock);
592}
593
594static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
595{
596 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
597 se_tpg_list);
598 struct se_dev_entry *deve;
599 struct se_lun *lun;
600 struct se_node_acl *se_nacl;
601 int j;
602
603 if (list_is_first(&se_tpg->se_tpg_list,
604 &se_global->g_se_tpg_list))
605 seq_puts(seq, "inst dev port indx dev_or_port intr_name "
606 "map_indx att_count num_cmds read_mbytes "
607 "write_mbytes hs_num_cmds creation_time row_status\n");
608
609 if (!(se_tpg))
610 return 0;
611
612 spin_lock(&se_tpg->acl_node_lock);
613 list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
614
615 atomic_inc(&se_nacl->mib_ref_count);
616 smp_mb__after_atomic_inc();
617 spin_unlock(&se_tpg->acl_node_lock);
618
619 spin_lock_irq(&se_nacl->device_list_lock);
620 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
621 deve = &se_nacl->device_list[j];
622 if (!(deve->lun_flags &
623 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
624 (!deve->se_lun))
625 continue;
626 lun = deve->se_lun;
627 if (!lun->lun_se_dev)
628 continue;
629
630 seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
631 " %u %s\n",
632 /* scsiInstIndex */
633 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
634 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
635 0,
636 /* scsiDeviceIndex */
637 lun->lun_se_dev->dev_index,
638 /* scsiAuthIntrTgtPortIndex */
639 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
640 /* scsiAuthIntrIndex */
641 se_nacl->acl_index,
642 /* scsiAuthIntrDevOrPort */
643 1,
644 /* scsiAuthIntrName */
645 se_nacl->initiatorname[0] ?
646 se_nacl->initiatorname : NONE,
647 /* FIXME: scsiAuthIntrLunMapIndex */
648 0,
649 /* scsiAuthIntrAttachedTimes */
650 deve->attach_count,
651 /* scsiAuthIntrOutCommands */
652 deve->total_cmds,
653 /* scsiAuthIntrReadMegaBytes */
654 (u32)(deve->read_bytes >> 20),
655 /* scsiAuthIntrWrittenMegaBytes */
656 (u32)(deve->write_bytes >> 20),
657 /* FIXME: scsiAuthIntrHSOutCommands */
658 0,
659 /* scsiAuthIntrLastCreation */
660 (u32)(((u32)deve->creation_time -
661 INITIAL_JIFFIES) * 100 / HZ),
662 /* FIXME: scsiAuthIntrRowStatus */
663 "Ready");
664 }
665 spin_unlock_irq(&se_nacl->device_list_lock);
666
667 spin_lock(&se_tpg->acl_node_lock);
668 atomic_dec(&se_nacl->mib_ref_count);
669 smp_mb__after_atomic_dec();
670 }
671 spin_unlock(&se_tpg->acl_node_lock);
672
673 return 0;
674}
675
676static const struct seq_operations scsi_auth_intr_seq_ops = {
677 .start = scsi_auth_intr_seq_start,
678 .next = scsi_auth_intr_seq_next,
679 .stop = scsi_auth_intr_seq_stop,
680 .show = scsi_auth_intr_seq_show
681};
682
683static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
684{
685 return seq_open(file, &scsi_auth_intr_seq_ops);
686}
687
688static const struct file_operations scsi_auth_intr_seq_fops = {
689 .owner = THIS_MODULE,
690 .open = scsi_auth_intr_seq_open,
691 .read = seq_read,
692 .llseek = seq_lseek,
693 .release = seq_release,
694};
695
696/*
697 * SCSI Attached Initiator Port Table:
698 * It lists the SCSI Initiators attached to one of the local Target ports.
699 * Iterates through all active TPGs and use active sessions from each TPG
700 * to list the info fo this table.
701 */
702static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
703{
704 spin_lock_bh(&se_global->se_tpg_lock);
705 return seq_list_start(&se_global->g_se_tpg_list, *pos);
706}
707
708static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
709 loff_t *pos)
710{
711 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
712}
713
714static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
715{
716 spin_unlock_bh(&se_global->se_tpg_lock);
717}
718
719static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
720{
721 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
722 se_tpg_list);
723 struct se_dev_entry *deve;
724 struct se_lun *lun;
725 struct se_node_acl *se_nacl;
726 struct se_session *se_sess;
727 unsigned char buf[64];
728 int j;
729
730 if (list_is_first(&se_tpg->se_tpg_list,
731 &se_global->g_se_tpg_list))
732 seq_puts(seq, "inst dev port indx port_auth_indx port_name"
733 " port_ident\n");
734
735 if (!(se_tpg))
736 return 0;
737
738 spin_lock(&se_tpg->session_lock);
739 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
740 if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
741 (!se_sess->se_node_acl) ||
742 (!se_sess->se_node_acl->device_list))
743 continue;
744
745 atomic_inc(&se_sess->mib_ref_count);
746 smp_mb__after_atomic_inc();
747 se_nacl = se_sess->se_node_acl;
748 atomic_inc(&se_nacl->mib_ref_count);
749 smp_mb__after_atomic_inc();
750 spin_unlock(&se_tpg->session_lock);
751
752 spin_lock_irq(&se_nacl->device_list_lock);
753 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
754 deve = &se_nacl->device_list[j];
755 if (!(deve->lun_flags &
756 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
757 (!deve->se_lun))
758 continue;
759
760 lun = deve->se_lun;
761 if (!lun->lun_se_dev)
762 continue;
763
764 memset(buf, 0, 64);
765 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
766 TPG_TFO(se_tpg)->sess_get_initiator_sid(
767 se_sess, (unsigned char *)&buf[0], 64);
768
769 seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
770 /* scsiInstIndex */
771 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
772 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
773 0,
774 /* scsiDeviceIndex */
775 lun->lun_se_dev->dev_index,
776 /* scsiPortIndex */
777 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
778 /* scsiAttIntrPortIndex */
779 (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
780 TPG_TFO(se_tpg)->sess_get_index(se_sess) :
781 0,
782 /* scsiAttIntrPortAuthIntrIdx */
783 se_nacl->acl_index,
784 /* scsiAttIntrPortName */
785 se_nacl->initiatorname[0] ?
786 se_nacl->initiatorname : NONE,
787 /* scsiAttIntrPortIdentifier */
788 buf);
789 }
790 spin_unlock_irq(&se_nacl->device_list_lock);
791
792 spin_lock(&se_tpg->session_lock);
793 atomic_dec(&se_nacl->mib_ref_count);
794 smp_mb__after_atomic_dec();
795 atomic_dec(&se_sess->mib_ref_count);
796 smp_mb__after_atomic_dec();
797 }
798 spin_unlock(&se_tpg->session_lock);
799
800 return 0;
801}
802
803static const struct seq_operations scsi_att_intr_port_seq_ops = {
804 .start = scsi_att_intr_port_seq_start,
805 .next = scsi_att_intr_port_seq_next,
806 .stop = scsi_att_intr_port_seq_stop,
807 .show = scsi_att_intr_port_seq_show
808};
809
810static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
811{
812 return seq_open(file, &scsi_att_intr_port_seq_ops);
813}
814
815static const struct file_operations scsi_att_intr_port_seq_fops = {
816 .owner = THIS_MODULE,
817 .open = scsi_att_intr_port_seq_open,
818 .read = seq_read,
819 .llseek = seq_lseek,
820 .release = seq_release,
821};
822
823/*
824 * SCSI Logical Unit Table
825 */
826static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
827{
828 return locate_hba_start(seq, pos);
829}
830
831static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
832{
833 return locate_hba_next(seq, v, pos);
834}
835
836static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
837{
838 locate_hba_stop(seq, v);
839}
840
841#define SCSI_LU_INDEX 1
842static int scsi_lu_seq_show(struct seq_file *seq, void *v)
843{
844 struct se_hba *hba;
845 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
846 g_se_dev_list);
847 struct se_device *dev = se_dev->se_dev_ptr;
848 int j;
849 char str[28];
850
851 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
852 seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
853 " dev_type status state-bit num_cmds read_mbytes"
854 " write_mbytes resets full_stat hs_num_cmds creation_time\n");
855
856 if (!(dev))
857 return 0;
858
859 hba = dev->se_hba;
860 if (!(hba)) {
861 /* Log error ? */
862 return 0;
863 }
864
865 /* Fix LU state, if we can read it from the device */
866 seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
867 dev->dev_index, SCSI_LU_INDEX,
868 (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
869 (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
870 /* scsiLuWwnName */
871 (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
872 "None");
873
874 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
875 /* scsiLuVendorId */
876 for (j = 0; j < 8; j++)
877 str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
878 DEV_T10_WWN(dev)->vendor[j] : 0x20;
879 str[8] = 0;
880 seq_printf(seq, " %s", str);
881
882 /* scsiLuProductId */
883 for (j = 0; j < 16; j++)
884 str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
885 DEV_T10_WWN(dev)->model[j] : 0x20;
886 str[16] = 0;
887 seq_printf(seq, " %s", str);
888
889 /* scsiLuRevisionId */
890 for (j = 0; j < 4; j++)
891 str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
892 DEV_T10_WWN(dev)->revision[j] : 0x20;
893 str[4] = 0;
894 seq_printf(seq, " %s", str);
895
896 seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
897 /* scsiLuPeripheralType */
898 TRANSPORT(dev)->get_device_type(dev),
899 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
900 "available" : "notavailable", /* scsiLuStatus */
901 "exposed", /* scsiLuState */
902 (unsigned long long)dev->num_cmds,
903 /* scsiLuReadMegaBytes */
904 (u32)(dev->read_bytes >> 20),
905 /* scsiLuWrittenMegaBytes */
906 (u32)(dev->write_bytes >> 20),
907 dev->num_resets, /* scsiLuInResets */
908 0, /* scsiLuOutTaskSetFullStatus */
909 0, /* scsiLuHSInCommands */
910 (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
911 100 / HZ));
912
913 return 0;
914}
915
916static const struct seq_operations scsi_lu_seq_ops = {
917 .start = scsi_lu_seq_start,
918 .next = scsi_lu_seq_next,
919 .stop = scsi_lu_seq_stop,
920 .show = scsi_lu_seq_show
921};
922
923static int scsi_lu_seq_open(struct inode *inode, struct file *file)
924{
925 return seq_open(file, &scsi_lu_seq_ops);
926}
927
928static const struct file_operations scsi_lu_seq_fops = {
929 .owner = THIS_MODULE,
930 .open = scsi_lu_seq_open,
931 .read = seq_read,
932 .llseek = seq_lseek,
933 .release = seq_release,
934};
935
936/****************************************************************************/
937
938/*
939 * Remove proc fs entries
940 */
941void remove_scsi_target_mib(void)
942{
943 remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
944 remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
945 remove_proc_entry("scsi_target/mib/scsi_port", NULL);
946 remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
947 remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
948 remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
949 remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
950 remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
951 remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
952 remove_proc_entry("scsi_target/mib", NULL);
953}
954
955/*
956 * Create proc fs entries for the mib tables
957 */
958int init_scsi_target_mib(void)
959{
960 struct proc_dir_entry *dir_entry;
961 struct proc_dir_entry *scsi_inst_entry;
962 struct proc_dir_entry *scsi_dev_entry;
963 struct proc_dir_entry *scsi_port_entry;
964 struct proc_dir_entry *scsi_transport_entry;
965 struct proc_dir_entry *scsi_tgt_dev_entry;
966 struct proc_dir_entry *scsi_tgt_port_entry;
967 struct proc_dir_entry *scsi_auth_intr_entry;
968 struct proc_dir_entry *scsi_att_intr_port_entry;
969 struct proc_dir_entry *scsi_lu_entry;
970
971 dir_entry = proc_mkdir("scsi_target/mib", NULL);
972 if (!(dir_entry)) {
973 printk(KERN_ERR "proc_mkdir() failed.\n");
974 return -1;
975 }
976
977 scsi_inst_entry =
978 create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
979 if (scsi_inst_entry)
980 scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
981 else
982 goto error;
983
984 scsi_dev_entry =
985 create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
986 if (scsi_dev_entry)
987 scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
988 else
989 goto error;
990
991 scsi_port_entry =
992 create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
993 if (scsi_port_entry)
994 scsi_port_entry->proc_fops = &scsi_port_seq_fops;
995 else
996 goto error;
997
998 scsi_transport_entry =
999 create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
1000 if (scsi_transport_entry)
1001 scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
1002 else
1003 goto error;
1004
1005 scsi_tgt_dev_entry =
1006 create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
1007 if (scsi_tgt_dev_entry)
1008 scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
1009 else
1010 goto error;
1011
1012 scsi_tgt_port_entry =
1013 create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
1014 if (scsi_tgt_port_entry)
1015 scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
1016 else
1017 goto error;
1018
1019 scsi_auth_intr_entry =
1020 create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
1021 if (scsi_auth_intr_entry)
1022 scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
1023 else
1024 goto error;
1025
1026 scsi_att_intr_port_entry =
1027 create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
1028 if (scsi_att_intr_port_entry)
1029 scsi_att_intr_port_entry->proc_fops =
1030 &scsi_att_intr_port_seq_fops;
1031 else
1032 goto error;
1033
1034 scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
1035 if (scsi_lu_entry)
1036 scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
1037 else
1038 goto error;
1039
1040 return 0;
1041
1042error:
1043 printk(KERN_ERR "create_proc_entry() failed.\n");
1044 remove_scsi_target_mib();
1045 return -1;
1046}
1047
1048/*
1049 * Initialize the index table for allocating unique row indexes to various mib
1050 * tables
1051 */
1052void init_scsi_index_table(void)
1053{
1054 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
1055 spin_lock_init(&scsi_index_table.lock);
1056}
1057
1058/*
1059 * Allocate a new row index for the entry type specified
1060 */
1061u32 scsi_get_new_index(scsi_index_t type)
1062{
1063 u32 new_index;
1064
1065 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
1066 printk(KERN_ERR "Invalid index type %d\n", type);
1067 return -1;
1068 }
1069
1070 spin_lock(&scsi_index_table.lock);
1071 new_index = ++scsi_index_table.scsi_mib_index[type];
1072 if (new_index == 0)
1073 new_index = ++scsi_index_table.scsi_mib_index[type];
1074 spin_unlock(&scsi_index_table.lock);
1075
1076 return new_index;
1077}
1078EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
new file mode 100644
index 000000000000..277204633850
--- /dev/null
+++ b/drivers/target/target_core_mib.h
@@ -0,0 +1,28 @@
1#ifndef TARGET_CORE_MIB_H
2#define TARGET_CORE_MIB_H
3
4typedef enum {
5 SCSI_INST_INDEX,
6 SCSI_DEVICE_INDEX,
7 SCSI_AUTH_INTR_INDEX,
8 SCSI_INDEX_TYPE_MAX
9} scsi_index_t;
10
11struct scsi_index_table {
12 spinlock_t lock;
13 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
14} ____cacheline_aligned;
15
16/* SCSI Port stats */
17struct scsi_port_stats {
18 u64 cmd_pdus;
19 u64 tx_data_octets;
20 u64 rx_data_octets;
21} ____cacheline_aligned;
22
23extern int init_scsi_target_mib(void);
24extern void remove_scsi_target_mib(void);
25extern void init_scsi_index_table(void);
26extern u32 scsi_get_new_index(scsi_index_t);
27
28#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 000000000000..2521f75362c3
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
1/*******************************************************************************
2 * Filename: target_core_pr.c
3 *
4 * This file contains SPC-3 compliant persistent reservations and
5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
6 *
7 * Copyright (c) 2009, 2010 Rising Tide Systems
8 * Copyright (c) 2009, 2010 Linux-iSCSI.org
9 *
10 * Nicholas A. Bellinger <nab@kernel.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 *
26 ******************************************************************************/
27
28#include <linux/version.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/list.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_cmnd.h>
34#include <asm/unaligned.h>
35
36#include <target/target_core_base.h>
37#include <target/target_core_device.h>
38#include <target/target_core_tmr.h>
39#include <target/target_core_tpg.h>
40#include <target/target_core_transport.h>
41#include <target/target_core_fabric_ops.h>
42#include <target/target_core_configfs.h>
43
44#include "target_core_hba.h"
45#include "target_core_pr.h"
46#include "target_core_ua.h"
47
48/*
49 * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
50 */
51struct pr_transport_id_holder {
52 int dest_local_nexus;
53 struct t10_pr_registration *dest_pr_reg;
54 struct se_portal_group *dest_tpg;
55 struct se_node_acl *dest_node_acl;
56 struct se_dev_entry *dest_se_deve;
57 struct list_head dest_list;
58};
59
60int core_pr_dump_initiator_port(
61 struct t10_pr_registration *pr_reg,
62 char *buf,
63 u32 size)
64{
65 if (!(pr_reg->isid_present_at_reg))
66 return 0;
67
68 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
69 return 1;
70}
71
72static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
73 struct t10_pr_registration *, int);
74
75static int core_scsi2_reservation_seq_non_holder(
76 struct se_cmd *cmd,
77 unsigned char *cdb,
78 u32 pr_reg_type)
79{
80 switch (cdb[0]) {
81 case INQUIRY:
82 case RELEASE:
83 case RELEASE_10:
84 return 0;
85 default:
86 return 1;
87 }
88
89 return 1;
90}
91
92static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
93{
94 struct se_device *dev = cmd->se_dev;
95 struct se_session *sess = cmd->se_sess;
96 int ret;
97
98 if (!(sess))
99 return 0;
100
101 spin_lock(&dev->dev_reservation_lock);
102 if (!dev->dev_reserved_node_acl || !sess) {
103 spin_unlock(&dev->dev_reservation_lock);
104 return 0;
105 }
106 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
107 spin_unlock(&dev->dev_reservation_lock);
108 return -1;
109 }
110 if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
111 spin_unlock(&dev->dev_reservation_lock);
112 return 0;
113 }
114 ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
115 spin_unlock(&dev->dev_reservation_lock);
116
117 return ret;
118}
119
120static int core_scsi2_reservation_release(struct se_cmd *cmd)
121{
122 struct se_device *dev = cmd->se_dev;
123 struct se_session *sess = cmd->se_sess;
124 struct se_portal_group *tpg = sess->se_tpg;
125
126 if (!(sess) || !(tpg))
127 return 0;
128
129 spin_lock(&dev->dev_reservation_lock);
130 if (!dev->dev_reserved_node_acl || !sess) {
131 spin_unlock(&dev->dev_reservation_lock);
132 return 0;
133 }
134
135 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
136 spin_unlock(&dev->dev_reservation_lock);
137 return 0;
138 }
139 dev->dev_reserved_node_acl = NULL;
140 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
141 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
142 dev->dev_res_bin_isid = 0;
143 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
144 }
145 printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
146 " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
147 SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
148 sess->se_node_acl->initiatorname);
149 spin_unlock(&dev->dev_reservation_lock);
150
151 return 0;
152}
153
154static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
155{
156 struct se_device *dev = cmd->se_dev;
157 struct se_session *sess = cmd->se_sess;
158 struct se_portal_group *tpg = sess->se_tpg;
159
160 if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
161 (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
162 printk(KERN_ERR "LongIO and Obselete Bits set, returning"
163 " ILLEGAL_REQUEST\n");
164 return PYX_TRANSPORT_ILLEGAL_REQUEST;
165 }
166 /*
167 * This is currently the case for target_core_mod passthrough struct se_cmd
168 * ops
169 */
170 if (!(sess) || !(tpg))
171 return 0;
172
173 spin_lock(&dev->dev_reservation_lock);
174 if (dev->dev_reserved_node_acl &&
175 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
176 printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
177 TPG_TFO(tpg)->get_fabric_name());
178 printk(KERN_ERR "Original reserver LUN: %u %s\n",
179 SE_LUN(cmd)->unpacked_lun,
180 dev->dev_reserved_node_acl->initiatorname);
181 printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
182 " from %s \n", SE_LUN(cmd)->unpacked_lun,
183 cmd->se_deve->mapped_lun,
184 sess->se_node_acl->initiatorname);
185 spin_unlock(&dev->dev_reservation_lock);
186 return PYX_TRANSPORT_RESERVATION_CONFLICT;
187 }
188
189 dev->dev_reserved_node_acl = sess->se_node_acl;
190 dev->dev_flags |= DF_SPC2_RESERVATIONS;
191 if (sess->sess_bin_isid != 0) {
192 dev->dev_res_bin_isid = sess->sess_bin_isid;
193 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
194 }
195 printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
196 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
197 SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
198 sess->se_node_acl->initiatorname);
199 spin_unlock(&dev->dev_reservation_lock);
200
201 return 0;
202}
203
204static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
205 struct se_node_acl *, struct se_session *);
206static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
207
208/*
209 * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
210 * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
211 * thread context.
212 */
213int core_scsi2_emulate_crh(struct se_cmd *cmd)
214{
215 struct se_session *se_sess = cmd->se_sess;
216 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
217 struct t10_pr_registration *pr_reg;
218 struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
219 unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
220 int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
221 int conflict = 0;
222
223 if (!(se_sess))
224 return 0;
225
226 if (!(crh))
227 goto after_crh;
228
229 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
230 se_sess);
231 if (pr_reg) {
232 /*
233 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
234 * behavior
235 *
236 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
237 * status, but no reservation shall be established and the
238 * persistent reservation shall not be changed, if the command
239 * is received from a) and b) below.
240 *
241 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
242 * status, but the persistent reservation shall not be released,
243 * if the command is received from a) and b)
244 *
245 * a) An I_T nexus that is a persistent reservation holder; or
246 * b) An I_T nexus that is registered if a registrants only or
247 * all registrants type persistent reservation is present.
248 *
249 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
250 * RELEASE(6) command, or RELEASE(10) command shall be processed
251 * as defined in SPC-2.
252 */
253 if (pr_reg->pr_res_holder) {
254 core_scsi3_put_pr_reg(pr_reg);
255 return 0;
256 }
257 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
258 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
259 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
260 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
261 core_scsi3_put_pr_reg(pr_reg);
262 return 0;
263 }
264 core_scsi3_put_pr_reg(pr_reg);
265 conflict = 1;
266 } else {
267 /*
268 * Following spc2r20 5.5.1 Reservations overview:
269 *
270 * If a logical unit has executed a PERSISTENT RESERVE OUT
271 * command with the REGISTER or the REGISTER AND IGNORE
272 * EXISTING KEY service action and is still registered by any
273 * initiator, all RESERVE commands and all RELEASE commands
274 * regardless of initiator shall conflict and shall terminate
275 * with a RESERVATION CONFLICT status.
276 */
277 spin_lock(&pr_tmpl->registration_lock);
278 conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
279 spin_unlock(&pr_tmpl->registration_lock);
280 }
281
282 if (conflict) {
283 printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
284 " while active SPC-3 registrations exist,"
285 " returning RESERVATION_CONFLICT\n");
286 return PYX_TRANSPORT_RESERVATION_CONFLICT;
287 }
288
289after_crh:
290 if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
291 return core_scsi2_reservation_reserve(cmd);
292 else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
293 return core_scsi2_reservation_release(cmd);
294 else
295 return PYX_TRANSPORT_INVALID_CDB_FIELD;
296}
297
298/*
299 * Begin SPC-3/SPC-4 Persistent Reservations emulation support
300 *
301 * This function is called by those initiator ports who are *NOT*
302 * the active PR reservation holder when a reservation is present.
303 */
304static int core_scsi3_pr_seq_non_holder(
305 struct se_cmd *cmd,
306 unsigned char *cdb,
307 u32 pr_reg_type)
308{
309 struct se_dev_entry *se_deve;
310 struct se_session *se_sess = SE_SESS(cmd);
311 int other_cdb = 0, ignore_reg;
312 int registered_nexus = 0, ret = 1; /* Conflict by default */
313 int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
314 int we = 0; /* Write Exclusive */
315 int legacy = 0; /* Act like a legacy device and return
316 * RESERVATION CONFLICT on some CDBs */
317 /*
318 * A legacy SPC-2 reservation is being held.
319 */
320 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
321 return core_scsi2_reservation_seq_non_holder(cmd,
322 cdb, pr_reg_type);
323
324 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
325 /*
326 * Determine if the registration should be ignored due to
327 * non-matching ISIDs in core_scsi3_pr_reservation_check().
328 */
329 ignore_reg = (pr_reg_type & 0x80000000);
330 if (ignore_reg)
331 pr_reg_type &= ~0x80000000;
332
333 switch (pr_reg_type) {
334 case PR_TYPE_WRITE_EXCLUSIVE:
335 we = 1;
336 case PR_TYPE_EXCLUSIVE_ACCESS:
337 /*
338 * Some commands are only allowed for the persistent reservation
339 * holder.
340 */
341 if ((se_deve->def_pr_registered) && !(ignore_reg))
342 registered_nexus = 1;
343 break;
344 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
345 we = 1;
346 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
347 /*
348 * Some commands are only allowed for registered I_T Nexuses.
349 */
350 reg_only = 1;
351 if ((se_deve->def_pr_registered) && !(ignore_reg))
352 registered_nexus = 1;
353 break;
354 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
355 we = 1;
356 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
357 /*
358 * Each registered I_T Nexus is a reservation holder.
359 */
360 all_reg = 1;
361 if ((se_deve->def_pr_registered) && !(ignore_reg))
362 registered_nexus = 1;
363 break;
364 default:
365 return -1;
366 }
367 /*
368 * Referenced from spc4r17 table 45 for *NON* PR holder access
369 */
370 switch (cdb[0]) {
371 case SECURITY_PROTOCOL_IN:
372 if (registered_nexus)
373 return 0;
374 ret = (we) ? 0 : 1;
375 break;
376 case MODE_SENSE:
377 case MODE_SENSE_10:
378 case READ_ATTRIBUTE:
379 case READ_BUFFER:
380 case RECEIVE_DIAGNOSTIC:
381 if (legacy) {
382 ret = 1;
383 break;
384 }
385 if (registered_nexus) {
386 ret = 0;
387 break;
388 }
389 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
390 break;
391 case PERSISTENT_RESERVE_OUT:
392 /*
393 * This follows PERSISTENT_RESERVE_OUT service actions that
394 * are allowed in the presence of various reservations.
395 * See spc4r17, table 46
396 */
397 switch (cdb[1] & 0x1f) {
398 case PRO_CLEAR:
399 case PRO_PREEMPT:
400 case PRO_PREEMPT_AND_ABORT:
401 ret = (registered_nexus) ? 0 : 1;
402 break;
403 case PRO_REGISTER:
404 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
405 ret = 0;
406 break;
407 case PRO_REGISTER_AND_MOVE:
408 case PRO_RESERVE:
409 ret = 1;
410 break;
411 case PRO_RELEASE:
412 ret = (registered_nexus) ? 0 : 1;
413 break;
414 default:
415 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
416 " action: 0x%02x\n", cdb[1] & 0x1f);
417 return -1;
418 }
419 break;
420 case RELEASE:
421 case RELEASE_10:
422 /* Handled by CRH=1 in core_scsi2_emulate_crh() */
423 ret = 0;
424 break;
425 case RESERVE:
426 case RESERVE_10:
427 /* Handled by CRH=1 in core_scsi2_emulate_crh() */
428 ret = 0;
429 break;
430 case TEST_UNIT_READY:
431 ret = (legacy) ? 1 : 0; /* Conflict for legacy */
432 break;
433 case MAINTENANCE_IN:
434 switch (cdb[1] & 0x1f) {
435 case MI_MANAGEMENT_PROTOCOL_IN:
436 if (registered_nexus) {
437 ret = 0;
438 break;
439 }
440 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
441 break;
442 case MI_REPORT_SUPPORTED_OPERATION_CODES:
443 case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
444 if (legacy) {
445 ret = 1;
446 break;
447 }
448 if (registered_nexus) {
449 ret = 0;
450 break;
451 }
452 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
453 break;
454 case MI_REPORT_ALIASES:
455 case MI_REPORT_IDENTIFYING_INFORMATION:
456 case MI_REPORT_PRIORITY:
457 case MI_REPORT_TARGET_PGS:
458 case MI_REPORT_TIMESTAMP:
459 ret = 0; /* Allowed */
460 break;
461 default:
462 printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
463 (cdb[1] & 0x1f));
464 return -1;
465 }
466 break;
467 case ACCESS_CONTROL_IN:
468 case ACCESS_CONTROL_OUT:
469 case INQUIRY:
470 case LOG_SENSE:
471 case READ_MEDIA_SERIAL_NUMBER:
472 case REPORT_LUNS:
473 case REQUEST_SENSE:
474 ret = 0; /*/ Allowed CDBs */
475 break;
476 default:
477 other_cdb = 1;
478 break;
479 }
480 /*
481 * Case where the CDB is explictly allowed in the above switch
482 * statement.
483 */
484 if (!(ret) && !(other_cdb)) {
485#if 0
486 printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
487 " reservation holder\n", cdb[0],
488 core_scsi3_pr_dump_type(pr_reg_type));
489#endif
490 return ret;
491 }
492 /*
493 * Check if write exclusive initiator ports *NOT* holding the
494 * WRITE_EXCLUSIVE_* reservation.
495 */
496 if ((we) && !(registered_nexus)) {
497 if (cmd->data_direction == DMA_TO_DEVICE) {
498 /*
499 * Conflict for write exclusive
500 */
501 printk(KERN_INFO "%s Conflict for unregistered nexus"
502 " %s CDB: 0x%02x to %s reservation\n",
503 transport_dump_cmd_direction(cmd),
504 se_sess->se_node_acl->initiatorname, cdb[0],
505 core_scsi3_pr_dump_type(pr_reg_type));
506 return 1;
507 } else {
508 /*
509 * Allow non WRITE CDBs for all Write Exclusive
510 * PR TYPEs to pass for registered and
511 * non-registered_nexuxes NOT holding the reservation.
512 *
513 * We only make noise for the unregisterd nexuses,
514 * as we expect registered non-reservation holding
515 * nexuses to issue CDBs.
516 */
517#if 0
518 if (!(registered_nexus)) {
519 printk(KERN_INFO "Allowing implict CDB: 0x%02x"
520 " for %s reservation on unregistered"
521 " nexus\n", cdb[0],
522 core_scsi3_pr_dump_type(pr_reg_type));
523 }
524#endif
525 return 0;
526 }
527 } else if ((reg_only) || (all_reg)) {
528 if (registered_nexus) {
529 /*
530 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
531 * allow commands from registered nexuses.
532 */
533#if 0
534 printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
535 " reservation\n", cdb[0],
536 core_scsi3_pr_dump_type(pr_reg_type));
537#endif
538 return 0;
539 }
540 }
541 printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
542 " for %s reservation\n", transport_dump_cmd_direction(cmd),
543 (registered_nexus) ? "" : "un",
544 se_sess->se_node_acl->initiatorname, cdb[0],
545 core_scsi3_pr_dump_type(pr_reg_type));
546
547 return 1; /* Conflict by default */
548}
549
550static u32 core_scsi3_pr_generation(struct se_device *dev)
551{
552 struct se_subsystem_dev *su_dev = SU_DEV(dev);
553 u32 prg;
554 /*
555 * PRGeneration field shall contain the value of a 32-bit wrapping
556 * counter mainted by the device server.
557 *
558 * Note that this is done regardless of Active Persist across
559 * Target PowerLoss (APTPL)
560 *
561 * See spc4r17 section 6.3.12 READ_KEYS service action
562 */
563 spin_lock(&dev->dev_reservation_lock);
564 prg = T10_RES(su_dev)->pr_generation++;
565 spin_unlock(&dev->dev_reservation_lock);
566
567 return prg;
568}
569
570static int core_scsi3_pr_reservation_check(
571 struct se_cmd *cmd,
572 u32 *pr_reg_type)
573{
574 struct se_device *dev = cmd->se_dev;
575 struct se_session *sess = cmd->se_sess;
576 int ret;
577
578 if (!(sess))
579 return 0;
580 /*
581 * A legacy SPC-2 reservation is being held.
582 */
583 if (dev->dev_flags & DF_SPC2_RESERVATIONS)
584 return core_scsi2_reservation_check(cmd, pr_reg_type);
585
586 spin_lock(&dev->dev_reservation_lock);
587 if (!(dev->dev_pr_res_holder)) {
588 spin_unlock(&dev->dev_reservation_lock);
589 return 0;
590 }
591 *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
592 cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
593 if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
594 spin_unlock(&dev->dev_reservation_lock);
595 return -1;
596 }
597 if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
598 spin_unlock(&dev->dev_reservation_lock);
599 return 0;
600 }
601 ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
602 sess->sess_bin_isid) ? 0 : -1;
603 /*
604 * Use bit in *pr_reg_type to notify ISID mismatch in
605 * core_scsi3_pr_seq_non_holder().
606 */
607 if (ret != 0)
608 *pr_reg_type |= 0x80000000;
609 spin_unlock(&dev->dev_reservation_lock);
610
611 return ret;
612}
613
614static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
615 struct se_device *dev,
616 struct se_node_acl *nacl,
617 struct se_dev_entry *deve,
618 unsigned char *isid,
619 u64 sa_res_key,
620 int all_tg_pt,
621 int aptpl)
622{
623 struct se_subsystem_dev *su_dev = SU_DEV(dev);
624 struct t10_pr_registration *pr_reg;
625
626 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
627 if (!(pr_reg)) {
628 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
629 return NULL;
630 }
631
632 pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
633 GFP_ATOMIC);
634 if (!(pr_reg->pr_aptpl_buf)) {
635 printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
636 kmem_cache_free(t10_pr_reg_cache, pr_reg);
637 return NULL;
638 }
639
640 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
641 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
642 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
643 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
644 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
645 atomic_set(&pr_reg->pr_res_holders, 0);
646 pr_reg->pr_reg_nacl = nacl;
647 pr_reg->pr_reg_deve = deve;
648 pr_reg->pr_res_mapped_lun = deve->mapped_lun;
649 pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
650 pr_reg->pr_res_key = sa_res_key;
651 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
652 pr_reg->pr_reg_aptpl = aptpl;
653 pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
654 /*
655 * If an ISID value for this SCSI Initiator Port exists,
656 * save it to the registration now.
657 */
658 if (isid != NULL) {
659 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
660 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
661 pr_reg->isid_present_at_reg = 1;
662 }
663
664 return pr_reg;
665}
666
667static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
668static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
669
670/*
671 * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
672 * modes.
673 */
674static struct t10_pr_registration *__core_scsi3_alloc_registration(
675 struct se_device *dev,
676 struct se_node_acl *nacl,
677 struct se_dev_entry *deve,
678 unsigned char *isid,
679 u64 sa_res_key,
680 int all_tg_pt,
681 int aptpl)
682{
683 struct se_dev_entry *deve_tmp;
684 struct se_node_acl *nacl_tmp;
685 struct se_port *port, *port_tmp;
686 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
687 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
688 int ret;
689 /*
690 * Create a registration for the I_T Nexus upon which the
691 * PROUT REGISTER was received.
692 */
693 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
694 sa_res_key, all_tg_pt, aptpl);
695 if (!(pr_reg))
696 return NULL;
697 /*
698 * Return pointer to pr_reg for ALL_TG_PT=0
699 */
700 if (!(all_tg_pt))
701 return pr_reg;
702 /*
703 * Create list of matching SCSI Initiator Port registrations
704 * for ALL_TG_PT=1
705 */
706 spin_lock(&dev->se_port_lock);
707 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
708 atomic_inc(&port->sep_tg_pt_ref_cnt);
709 smp_mb__after_atomic_inc();
710 spin_unlock(&dev->se_port_lock);
711
712 spin_lock_bh(&port->sep_alua_lock);
713 list_for_each_entry(deve_tmp, &port->sep_alua_list,
714 alua_port_list) {
715 /*
716 * This pointer will be NULL for demo mode MappedLUNs
717 * that have not been make explict via a ConfigFS
718 * MappedLUN group for the SCSI Initiator Node ACL.
719 */
720 if (!(deve_tmp->se_lun_acl))
721 continue;
722
723 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
724 /*
725 * Skip the matching struct se_node_acl that is allocated
726 * above..
727 */
728 if (nacl == nacl_tmp)
729 continue;
730 /*
731 * Only perform PR registrations for target ports on
732 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
733 * arrived.
734 */
735 if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
736 continue;
737 /*
738 * Look for a matching Initiator Node ACL in ASCII format
739 */
740 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
741 continue;
742
743 atomic_inc(&deve_tmp->pr_ref_count);
744 smp_mb__after_atomic_inc();
745 spin_unlock_bh(&port->sep_alua_lock);
746 /*
747 * Grab a configfs group dependency that is released
748 * for the exception path at label out: below, or upon
749 * completion of adding ALL_TG_PT=1 registrations in
750 * __core_scsi3_add_registration()
751 */
752 ret = core_scsi3_lunacl_depend_item(deve_tmp);
753 if (ret < 0) {
754 printk(KERN_ERR "core_scsi3_lunacl_depend"
755 "_item() failed\n");
756 atomic_dec(&port->sep_tg_pt_ref_cnt);
757 smp_mb__after_atomic_dec();
758 atomic_dec(&deve_tmp->pr_ref_count);
759 smp_mb__after_atomic_dec();
760 goto out;
761 }
762 /*
763 * Located a matching SCSI Initiator Port on a different
764 * port, allocate the pr_reg_atp and attach it to the
765 * pr_reg->pr_reg_atp_list that will be processed once
766 * the original *pr_reg is processed in
767 * __core_scsi3_add_registration()
768 */
769 pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
770 nacl_tmp, deve_tmp, NULL,
771 sa_res_key, all_tg_pt, aptpl);
772 if (!(pr_reg_atp)) {
773 atomic_dec(&port->sep_tg_pt_ref_cnt);
774 smp_mb__after_atomic_dec();
775 atomic_dec(&deve_tmp->pr_ref_count);
776 smp_mb__after_atomic_dec();
777 core_scsi3_lunacl_undepend_item(deve_tmp);
778 goto out;
779 }
780
781 list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
782 &pr_reg->pr_reg_atp_list);
783 spin_lock_bh(&port->sep_alua_lock);
784 }
785 spin_unlock_bh(&port->sep_alua_lock);
786
787 spin_lock(&dev->se_port_lock);
788 atomic_dec(&port->sep_tg_pt_ref_cnt);
789 smp_mb__after_atomic_dec();
790 }
791 spin_unlock(&dev->se_port_lock);
792
793 return pr_reg;
794out:
795 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
796 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
797 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
798 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
799 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
800 }
801 kmem_cache_free(t10_pr_reg_cache, pr_reg);
802 return NULL;
803}
804
805int core_scsi3_alloc_aptpl_registration(
806 struct t10_reservation_template *pr_tmpl,
807 u64 sa_res_key,
808 unsigned char *i_port,
809 unsigned char *isid,
810 u32 mapped_lun,
811 unsigned char *t_port,
812 u16 tpgt,
813 u32 target_lun,
814 int res_holder,
815 int all_tg_pt,
816 u8 type)
817{
818 struct t10_pr_registration *pr_reg;
819
820 if (!(i_port) || !(t_port) || !(sa_res_key)) {
821 printk(KERN_ERR "Illegal parameters for APTPL registration\n");
822 return -1;
823 }
824
825 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
826 if (!(pr_reg)) {
827 printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
828 return -1;
829 }
830 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
831
832 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
833 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
834 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
835 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
836 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
837 atomic_set(&pr_reg->pr_res_holders, 0);
838 pr_reg->pr_reg_nacl = NULL;
839 pr_reg->pr_reg_deve = NULL;
840 pr_reg->pr_res_mapped_lun = mapped_lun;
841 pr_reg->pr_aptpl_target_lun = target_lun;
842 pr_reg->pr_res_key = sa_res_key;
843 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
844 pr_reg->pr_reg_aptpl = 1;
845 pr_reg->pr_reg_tg_pt_lun = NULL;
846 pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
847 pr_reg->pr_res_type = type;
848 /*
849 * If an ISID value had been saved in APTPL metadata for this
850 * SCSI Initiator Port, restore it now.
851 */
852 if (isid != NULL) {
853 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
854 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
855 pr_reg->isid_present_at_reg = 1;
856 }
857 /*
858 * Copy the i_port and t_port information from caller.
859 */
860 snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
861 snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
862 pr_reg->pr_reg_tpgt = tpgt;
863 /*
864 * Set pr_res_holder from caller, the pr_reg who is the reservation
865 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
866 * the Initiator Node LUN ACL from the fabric module is created for
867 * this registration.
868 */
869 pr_reg->pr_res_holder = res_holder;
870
871 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
872 printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
873 " metadata\n", (res_holder) ? "+reservation" : "");
874 return 0;
875}
876
877static void core_scsi3_aptpl_reserve(
878 struct se_device *dev,
879 struct se_portal_group *tpg,
880 struct se_node_acl *node_acl,
881 struct t10_pr_registration *pr_reg)
882{
883 char i_buf[PR_REG_ISID_ID_LEN];
884 int prf_isid;
885
886 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
887 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
888 PR_REG_ISID_ID_LEN);
889
890 spin_lock(&dev->dev_reservation_lock);
891 dev->dev_pr_res_holder = pr_reg;
892 spin_unlock(&dev->dev_reservation_lock);
893
894 printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
895 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
896 TPG_TFO(tpg)->get_fabric_name(),
897 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
898 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
899 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
900 TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
901 (prf_isid) ? &i_buf[0] : "");
902}
903
904static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
905 struct t10_pr_registration *, int, int);
906
907static int __core_scsi3_check_aptpl_registration(
908 struct se_device *dev,
909 struct se_portal_group *tpg,
910 struct se_lun *lun,
911 u32 target_lun,
912 struct se_node_acl *nacl,
913 struct se_dev_entry *deve)
914{
915 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
916 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
917 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
918 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
919 u16 tpgt;
920
921 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
922 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
923 /*
924 * Copy Initiator Port information from struct se_node_acl
925 */
926 snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
927 snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
928 TPG_TFO(tpg)->tpg_get_wwn(tpg));
929 tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
930 /*
931 * Look for the matching registrations+reservation from those
932 * created from APTPL metadata. Note that multiple registrations
933 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
934 * TransportIDs.
935 */
936 spin_lock(&pr_tmpl->aptpl_reg_lock);
937 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
938 pr_reg_aptpl_list) {
939 if (!(strcmp(pr_reg->pr_iport, i_port)) &&
940 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
941 !(strcmp(pr_reg->pr_tport, t_port)) &&
942 (pr_reg->pr_reg_tpgt == tpgt) &&
943 (pr_reg->pr_aptpl_target_lun == target_lun)) {
944
945 pr_reg->pr_reg_nacl = nacl;
946 pr_reg->pr_reg_deve = deve;
947 pr_reg->pr_reg_tg_pt_lun = lun;
948
949 list_del(&pr_reg->pr_reg_aptpl_list);
950 spin_unlock(&pr_tmpl->aptpl_reg_lock);
951 /*
952 * At this point all of the pointers in *pr_reg will
953 * be setup, so go ahead and add the registration.
954 */
955
956 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
957 /*
958 * If this registration is the reservation holder,
959 * make that happen now..
960 */
961 if (pr_reg->pr_res_holder)
962 core_scsi3_aptpl_reserve(dev, tpg,
963 nacl, pr_reg);
964 /*
965 * Reenable pr_aptpl_active to accept new metadata
966 * updates once the SCSI device is active again..
967 */
968 spin_lock(&pr_tmpl->aptpl_reg_lock);
969 pr_tmpl->pr_aptpl_active = 1;
970 }
971 }
972 spin_unlock(&pr_tmpl->aptpl_reg_lock);
973
974 return 0;
975}
976
977int core_scsi3_check_aptpl_registration(
978 struct se_device *dev,
979 struct se_portal_group *tpg,
980 struct se_lun *lun,
981 struct se_lun_acl *lun_acl)
982{
983 struct se_subsystem_dev *su_dev = SU_DEV(dev);
984 struct se_node_acl *nacl = lun_acl->se_lun_nacl;
985 struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
986
987 if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
988 return 0;
989
990 return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
991 lun->unpacked_lun, nacl, deve);
992}
993
994static void __core_scsi3_dump_registration(
995 struct target_core_fabric_ops *tfo,
996 struct se_device *dev,
997 struct se_node_acl *nacl,
998 struct t10_pr_registration *pr_reg,
999 int register_type)
1000{
1001 struct se_portal_group *se_tpg = nacl->se_tpg;
1002 char i_buf[PR_REG_ISID_ID_LEN];
1003 int prf_isid;
1004
1005 memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
1006 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1007 PR_REG_ISID_ID_LEN);
1008
1009 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1010 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
1011 "_AND_MOVE" : (register_type == 1) ?
1012 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1013 (prf_isid) ? i_buf : "");
1014 printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1015 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
1016 tfo->tpg_get_tag(se_tpg));
1017 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1018 " Port(s)\n", tfo->get_fabric_name(),
1019 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1020 TRANSPORT(dev)->name);
1021 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1022 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
1023 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1024 pr_reg->pr_reg_aptpl);
1025}
1026
1027/*
1028 * this function can be called with struct se_device->dev_reservation_lock
1029 * when register_move = 1
1030 */
1031static void __core_scsi3_add_registration(
1032 struct se_device *dev,
1033 struct se_node_acl *nacl,
1034 struct t10_pr_registration *pr_reg,
1035 int register_type,
1036 int register_move)
1037{
1038 struct se_subsystem_dev *su_dev = SU_DEV(dev);
1039 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
1040 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1041 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1042
1043 /*
1044 * Increment PRgeneration counter for struct se_device upon a successful
1045 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
1046 *
1047 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
1048 * action, the struct se_device->dev_reservation_lock will already be held,
1049 * so we do not call core_scsi3_pr_generation() which grabs the lock
1050 * for the REGISTER.
1051 */
1052 pr_reg->pr_res_generation = (register_move) ?
1053 T10_RES(su_dev)->pr_generation++ :
1054 core_scsi3_pr_generation(dev);
1055
1056 spin_lock(&pr_tmpl->registration_lock);
1057 list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
1058 pr_reg->pr_reg_deve->def_pr_registered = 1;
1059
1060 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
1061 spin_unlock(&pr_tmpl->registration_lock);
1062 /*
1063 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1064 */
1065 if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
1066 return;
1067 /*
1068 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
1069 * allocated in __core_scsi3_alloc_registration()
1070 */
1071 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1072 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
1073 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1074
1075 pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
1076
1077 spin_lock(&pr_tmpl->registration_lock);
1078 list_add_tail(&pr_reg_tmp->pr_reg_list,
1079 &pr_tmpl->registration_list);
1080 pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
1081
1082 __core_scsi3_dump_registration(tfo, dev,
1083 pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
1084 register_type);
1085 spin_unlock(&pr_tmpl->registration_lock);
1086 /*
1087 * Drop configfs group dependency reference from
1088 * __core_scsi3_alloc_registration()
1089 */
1090 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1091 }
1092}
1093
1094static int core_scsi3_alloc_registration(
1095 struct se_device *dev,
1096 struct se_node_acl *nacl,
1097 struct se_dev_entry *deve,
1098 unsigned char *isid,
1099 u64 sa_res_key,
1100 int all_tg_pt,
1101 int aptpl,
1102 int register_type,
1103 int register_move)
1104{
1105 struct t10_pr_registration *pr_reg;
1106
1107 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
1108 sa_res_key, all_tg_pt, aptpl);
1109 if (!(pr_reg))
1110 return -1;
1111
1112 __core_scsi3_add_registration(dev, nacl, pr_reg,
1113 register_type, register_move);
1114 return 0;
1115}
1116
1117static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1118 struct se_device *dev,
1119 struct se_node_acl *nacl,
1120 unsigned char *isid)
1121{
1122 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1123 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
1124 struct se_portal_group *tpg;
1125
1126 spin_lock(&pr_tmpl->registration_lock);
1127 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1128 &pr_tmpl->registration_list, pr_reg_list) {
1129 /*
1130 * First look for a matching struct se_node_acl
1131 */
1132 if (pr_reg->pr_reg_nacl != nacl)
1133 continue;
1134
1135 tpg = pr_reg->pr_reg_nacl->se_tpg;
1136 /*
1137 * If this registration does NOT contain a fabric provided
1138 * ISID, then we have found a match.
1139 */
1140 if (!(pr_reg->isid_present_at_reg)) {
1141 /*
1142 * Determine if this SCSI device server requires that
1143 * SCSI Intiatior TransportID w/ ISIDs is enforced
1144 * for fabric modules (iSCSI) requiring them.
1145 */
1146 if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
1147 if (DEV_ATTRIB(dev)->enforce_pr_isids)
1148 continue;
1149 }
1150 atomic_inc(&pr_reg->pr_res_holders);
1151 smp_mb__after_atomic_inc();
1152 spin_unlock(&pr_tmpl->registration_lock);
1153 return pr_reg;
1154 }
1155 /*
1156 * If the *pr_reg contains a fabric defined ISID for multi-value
1157 * SCSI Initiator Port TransportIDs, then we expect a valid
1158 * matching ISID to be provided by the local SCSI Initiator Port.
1159 */
1160 if (!(isid))
1161 continue;
1162 if (strcmp(isid, pr_reg->pr_reg_isid))
1163 continue;
1164
1165 atomic_inc(&pr_reg->pr_res_holders);
1166 smp_mb__after_atomic_inc();
1167 spin_unlock(&pr_tmpl->registration_lock);
1168 return pr_reg;
1169 }
1170 spin_unlock(&pr_tmpl->registration_lock);
1171
1172 return NULL;
1173}
1174
1175static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1176 struct se_device *dev,
1177 struct se_node_acl *nacl,
1178 struct se_session *sess)
1179{
1180 struct se_portal_group *tpg = nacl->se_tpg;
1181 unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
1182
1183 if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
1184 memset(&buf[0], 0, PR_REG_ISID_LEN);
1185 TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
1186 PR_REG_ISID_LEN);
1187 isid_ptr = &buf[0];
1188 }
1189
1190 return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
1191}
1192
1193static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1194{
1195 atomic_dec(&pr_reg->pr_res_holders);
1196 smp_mb__after_atomic_dec();
1197}
1198
1199static int core_scsi3_check_implict_release(
1200 struct se_device *dev,
1201 struct t10_pr_registration *pr_reg)
1202{
1203 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
1204 struct t10_pr_registration *pr_res_holder;
1205 int ret = 0;
1206
1207 spin_lock(&dev->dev_reservation_lock);
1208 pr_res_holder = dev->dev_pr_res_holder;
1209 if (!(pr_res_holder)) {
1210 spin_unlock(&dev->dev_reservation_lock);
1211 return ret;
1212 }
1213 if (pr_res_holder == pr_reg) {
1214 /*
1215 * Perform an implict RELEASE if the registration that
1216 * is being released is holding the reservation.
1217 *
1218 * From spc4r17, section 5.7.11.1:
1219 *
1220 * e) If the I_T nexus is the persistent reservation holder
1221 * and the persistent reservation is not an all registrants
1222 * type, then a PERSISTENT RESERVE OUT command with REGISTER
1223 * service action or REGISTER AND IGNORE EXISTING KEY
1224 * service action with the SERVICE ACTION RESERVATION KEY
1225 * field set to zero (see 5.7.11.3).
1226 */
1227 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
1228 ret = 1;
1229 /*
1230 * For 'All Registrants' reservation types, all existing
1231 * registrations are still processed as reservation holders
1232 * in core_scsi3_pr_seq_non_holder() after the initial
1233 * reservation holder is implictly released here.
1234 */
1235 } else if (pr_reg->pr_reg_all_tg_pt &&
1236 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
1237 pr_reg->pr_reg_nacl->initiatorname)) &&
1238 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
1239 printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
1240 " UNREGISTER while existing reservation with matching"
1241 " key 0x%016Lx is present from another SCSI Initiator"
1242 " Port\n", pr_reg->pr_res_key);
1243 ret = -1;
1244 }
1245 spin_unlock(&dev->dev_reservation_lock);
1246
1247 return ret;
1248}
1249
1250/*
1251 * Called with struct t10_reservation_template->registration_lock held.
1252 */
1253static void __core_scsi3_free_registration(
1254 struct se_device *dev,
1255 struct t10_pr_registration *pr_reg,
1256 struct list_head *preempt_and_abort_list,
1257 int dec_holders)
1258{
1259 struct target_core_fabric_ops *tfo =
1260 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1261 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1262 char i_buf[PR_REG_ISID_ID_LEN];
1263 int prf_isid;
1264
1265 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1266 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1267 PR_REG_ISID_ID_LEN);
1268
1269 pr_reg->pr_reg_deve->def_pr_registered = 0;
1270 pr_reg->pr_reg_deve->pr_res_key = 0;
1271 list_del(&pr_reg->pr_reg_list);
1272 /*
1273 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
1274 * so call core_scsi3_put_pr_reg() to decrement our reference.
1275 */
1276 if (dec_holders)
1277 core_scsi3_put_pr_reg(pr_reg);
1278 /*
1279 * Wait until all reference from any other I_T nexuses for this
1280 * *pr_reg have been released. Because list_del() is called above,
1281 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
1282 * count back to zero, and we release *pr_reg.
1283 */
1284 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1285 spin_unlock(&pr_tmpl->registration_lock);
1286 printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
1287 tfo->get_fabric_name());
1288 cpu_relax();
1289 spin_lock(&pr_tmpl->registration_lock);
1290 }
1291
1292 printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1293 " Node: %s%s\n", tfo->get_fabric_name(),
1294 pr_reg->pr_reg_nacl->initiatorname,
1295 (prf_isid) ? &i_buf[0] : "");
1296 printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1297 " Port(s)\n", tfo->get_fabric_name(),
1298 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1299 TRANSPORT(dev)->name);
1300 printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1301 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
1302 pr_reg->pr_res_generation);
1303
1304 if (!(preempt_and_abort_list)) {
1305 pr_reg->pr_reg_deve = NULL;
1306 pr_reg->pr_reg_nacl = NULL;
1307 kfree(pr_reg->pr_aptpl_buf);
1308 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1309 return;
1310 }
1311 /*
1312 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
1313 * are released once the ABORT_TASK_SET has completed..
1314 */
1315 list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
1316}
1317
1318void core_scsi3_free_pr_reg_from_nacl(
1319 struct se_device *dev,
1320 struct se_node_acl *nacl)
1321{
1322 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1323 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1324 /*
1325 * If the passed se_node_acl matches the reservation holder,
1326 * release the reservation.
1327 */
1328 spin_lock(&dev->dev_reservation_lock);
1329 pr_res_holder = dev->dev_pr_res_holder;
1330 if ((pr_res_holder != NULL) &&
1331 (pr_res_holder->pr_reg_nacl == nacl))
1332 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
1333 spin_unlock(&dev->dev_reservation_lock);
1334 /*
1335 * Release any registration associated with the struct se_node_acl.
1336 */
1337 spin_lock(&pr_tmpl->registration_lock);
1338 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1339 &pr_tmpl->registration_list, pr_reg_list) {
1340
1341 if (pr_reg->pr_reg_nacl != nacl)
1342 continue;
1343
1344 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1345 }
1346 spin_unlock(&pr_tmpl->registration_lock);
1347}
1348
1349void core_scsi3_free_all_registrations(
1350 struct se_device *dev)
1351{
1352 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
1353 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1354
1355 spin_lock(&dev->dev_reservation_lock);
1356 pr_res_holder = dev->dev_pr_res_holder;
1357 if (pr_res_holder != NULL) {
1358 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
1359 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
1360 pr_res_holder, 0);
1361 }
1362 spin_unlock(&dev->dev_reservation_lock);
1363
1364 spin_lock(&pr_tmpl->registration_lock);
1365 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1366 &pr_tmpl->registration_list, pr_reg_list) {
1367
1368 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1369 }
1370 spin_unlock(&pr_tmpl->registration_lock);
1371
1372 spin_lock(&pr_tmpl->aptpl_reg_lock);
1373 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
1374 pr_reg_aptpl_list) {
1375 list_del(&pr_reg->pr_reg_aptpl_list);
1376 kfree(pr_reg->pr_aptpl_buf);
1377 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1378 }
1379 spin_unlock(&pr_tmpl->aptpl_reg_lock);
1380}
1381
1382static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
1383{
1384 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1385 &tpg->tpg_group.cg_item);
1386}
1387
1388static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1389{
1390 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1391 &tpg->tpg_group.cg_item);
1392
1393 atomic_dec(&tpg->tpg_pr_ref_count);
1394 smp_mb__after_atomic_dec();
1395}
1396
1397static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
1398{
1399 struct se_portal_group *tpg = nacl->se_tpg;
1400
1401 if (nacl->dynamic_node_acl)
1402 return 0;
1403
1404 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1405 &nacl->acl_group.cg_item);
1406}
1407
1408static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1409{
1410 struct se_portal_group *tpg = nacl->se_tpg;
1411
1412 if (nacl->dynamic_node_acl) {
1413 atomic_dec(&nacl->acl_pr_ref_count);
1414 smp_mb__after_atomic_dec();
1415 return;
1416 }
1417
1418 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1419 &nacl->acl_group.cg_item);
1420
1421 atomic_dec(&nacl->acl_pr_ref_count);
1422 smp_mb__after_atomic_dec();
1423}
1424
1425static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1426{
1427 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1428 struct se_node_acl *nacl;
1429 struct se_portal_group *tpg;
1430 /*
1431 * For nacl->dynamic_node_acl=1
1432 */
1433 if (!(lun_acl))
1434 return 0;
1435
1436 nacl = lun_acl->se_lun_nacl;
1437 tpg = nacl->se_tpg;
1438
1439 return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
1440 &lun_acl->se_lun_group.cg_item);
1441}
1442
1443static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1444{
1445 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1446 struct se_node_acl *nacl;
1447 struct se_portal_group *tpg;
1448 /*
1449 * For nacl->dynamic_node_acl=1
1450 */
1451 if (!(lun_acl)) {
1452 atomic_dec(&se_deve->pr_ref_count);
1453 smp_mb__after_atomic_dec();
1454 return;
1455 }
1456 nacl = lun_acl->se_lun_nacl;
1457 tpg = nacl->se_tpg;
1458
1459 configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
1460 &lun_acl->se_lun_group.cg_item);
1461
1462 atomic_dec(&se_deve->pr_ref_count);
1463 smp_mb__after_atomic_dec();
1464}
1465
1466static int core_scsi3_decode_spec_i_port(
1467 struct se_cmd *cmd,
1468 struct se_portal_group *tpg,
1469 unsigned char *l_isid,
1470 u64 sa_res_key,
1471 int all_tg_pt,
1472 int aptpl)
1473{
1474 struct se_device *dev = SE_DEV(cmd);
1475 struct se_port *tmp_port;
1476 struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
1477 struct se_session *se_sess = SE_SESS(cmd);
1478 struct se_node_acl *dest_node_acl = NULL;
1479 struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
1480 struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
1481 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1482 struct list_head tid_dest_list;
1483 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1484 struct target_core_fabric_ops *tmp_tf_ops;
1485 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
1486 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1487 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
1488 u32 tpdl, tid_len = 0;
1489 int ret, dest_local_nexus, prf_isid;
1490 u32 dest_rtpi = 0;
1491
1492 memset(dest_iport, 0, 64);
1493 INIT_LIST_HEAD(&tid_dest_list);
1494
1495 local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
1496 /*
1497 * Allocate a struct pr_transport_id_holder and setup the
1498 * local_node_acl and local_se_deve pointers and add to
1499 * struct list_head tid_dest_list for add registration
1500 * processing in the loop of tid_dest_list below.
1501 */
1502 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1503 if (!(tidh_new)) {
1504 printk(KERN_ERR "Unable to allocate tidh_new\n");
1505 return PYX_TRANSPORT_LU_COMM_FAILURE;
1506 }
1507 INIT_LIST_HEAD(&tidh_new->dest_list);
1508 tidh_new->dest_tpg = tpg;
1509 tidh_new->dest_node_acl = se_sess->se_node_acl;
1510 tidh_new->dest_se_deve = local_se_deve;
1511
1512 local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
1513 se_sess->se_node_acl, local_se_deve, l_isid,
1514 sa_res_key, all_tg_pt, aptpl);
1515 if (!(local_pr_reg)) {
1516 kfree(tidh_new);
1517 return PYX_TRANSPORT_LU_COMM_FAILURE;
1518 }
1519 tidh_new->dest_pr_reg = local_pr_reg;
1520 /*
1521 * The local I_T nexus does not hold any configfs dependances,
1522 * so we set tid_h->dest_local_nexus=1 to prevent the
1523 * configfs_undepend_item() calls in the tid_dest_list loops below.
1524 */
1525 tidh_new->dest_local_nexus = 1;
1526 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1527 /*
1528 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
1529 * first extract TransportID Parameter Data Length, and make sure
1530 * the value matches up to the SCSI expected data transfer length.
1531 */
1532 tpdl = (buf[24] & 0xff) << 24;
1533 tpdl |= (buf[25] & 0xff) << 16;
1534 tpdl |= (buf[26] & 0xff) << 8;
1535 tpdl |= buf[27] & 0xff;
1536
1537 if ((tpdl + 28) != cmd->data_length) {
1538 printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1539 " does not equal CDB data_length: %u\n", tpdl,
1540 cmd->data_length);
1541 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1542 goto out;
1543 }
1544 /*
1545 * Start processing the received transport IDs using the
1546 * receiving I_T Nexus portal's fabric dependent methods to
1547 * obtain the SCSI Initiator Port/Device Identifiers.
1548 */
1549 ptr = &buf[28];
1550
1551 while (tpdl > 0) {
1552 proto_ident = (ptr[0] & 0x0f);
1553 dest_tpg = NULL;
1554
1555 spin_lock(&dev->se_port_lock);
1556 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
1557 tmp_tpg = tmp_port->sep_tpg;
1558 if (!(tmp_tpg))
1559 continue;
1560 tmp_tf_ops = TPG_TFO(tmp_tpg);
1561 if (!(tmp_tf_ops))
1562 continue;
1563 if (!(tmp_tf_ops->get_fabric_proto_ident) ||
1564 !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
1565 continue;
1566 /*
1567 * Look for the matching proto_ident provided by
1568 * the received TransportID
1569 */
1570 tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
1571 if (tmp_proto_ident != proto_ident)
1572 continue;
1573 dest_rtpi = tmp_port->sep_rtpi;
1574
1575 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
1576 tmp_tpg, (const char *)ptr, &tid_len,
1577 &iport_ptr);
1578 if (!(i_str))
1579 continue;
1580
1581 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
1582 smp_mb__after_atomic_inc();
1583 spin_unlock(&dev->se_port_lock);
1584
1585 ret = core_scsi3_tpg_depend_item(tmp_tpg);
1586 if (ret != 0) {
1587 printk(KERN_ERR " core_scsi3_tpg_depend_item()"
1588 " for tmp_tpg\n");
1589 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1590 smp_mb__after_atomic_dec();
1591 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1592 goto out;
1593 }
1594 /*
1595 * Locate the desination initiator ACL to be registered
1596 * from the decoded fabric module specific TransportID
1597 * at *i_str.
1598 */
1599 spin_lock_bh(&tmp_tpg->acl_node_lock);
1600 dest_node_acl = __core_tpg_get_initiator_node_acl(
1601 tmp_tpg, i_str);
1602 if (dest_node_acl) {
1603 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1604 smp_mb__after_atomic_inc();
1605 }
1606 spin_unlock_bh(&tmp_tpg->acl_node_lock);
1607
1608 if (!(dest_node_acl)) {
1609 core_scsi3_tpg_undepend_item(tmp_tpg);
1610 spin_lock(&dev->se_port_lock);
1611 continue;
1612 }
1613
1614 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
1615 if (ret != 0) {
1616 printk(KERN_ERR "configfs_depend_item() failed"
1617 " for dest_node_acl->acl_group\n");
1618 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1619 smp_mb__after_atomic_dec();
1620 core_scsi3_tpg_undepend_item(tmp_tpg);
1621 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1622 goto out;
1623 }
1624
1625 dest_tpg = tmp_tpg;
1626 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
1627 " %s Port RTPI: %hu\n",
1628 TPG_TFO(dest_tpg)->get_fabric_name(),
1629 dest_node_acl->initiatorname, dest_rtpi);
1630
1631 spin_lock(&dev->se_port_lock);
1632 break;
1633 }
1634 spin_unlock(&dev->se_port_lock);
1635
1636 if (!(dest_tpg)) {
1637 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
1638 " dest_tpg\n");
1639 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1640 goto out;
1641 }
1642#if 0
1643 printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1644 " tid_len: %d for %s + %s\n",
1645 TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
1646 tpdl, tid_len, i_str, iport_ptr);
1647#endif
1648 if (tid_len > tpdl) {
1649 printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
1650 " %u for Transport ID: %s\n", tid_len, ptr);
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1654 goto out;
1655 }
1656 /*
1657 * Locate the desintation struct se_dev_entry pointer for matching
1658 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
1659 * Target Port.
1660 */
1661 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
1662 dest_rtpi);
1663 if (!(dest_se_deve)) {
1664 printk(KERN_ERR "Unable to locate %s dest_se_deve"
1665 " from destination RTPI: %hu\n",
1666 TPG_TFO(dest_tpg)->get_fabric_name(),
1667 dest_rtpi);
1668
1669 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1670 core_scsi3_tpg_undepend_item(dest_tpg);
1671 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1672 goto out;
1673 }
1674
1675 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
1676 if (ret < 0) {
1677 printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
1678 " failed\n");
1679 atomic_dec(&dest_se_deve->pr_ref_count);
1680 smp_mb__after_atomic_dec();
1681 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1682 core_scsi3_tpg_undepend_item(dest_tpg);
1683 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1684 goto out;
1685 }
1686#if 0
1687 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1688 " dest_se_deve mapped_lun: %u\n",
1689 TPG_TFO(dest_tpg)->get_fabric_name(),
1690 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1691#endif
1692 /*
1693 * Skip any TransportIDs that already have a registration for
1694 * this target port.
1695 */
1696 pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
1697 iport_ptr);
1698 if (pr_reg_e) {
1699 core_scsi3_put_pr_reg(pr_reg_e);
1700 core_scsi3_lunacl_undepend_item(dest_se_deve);
1701 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1702 core_scsi3_tpg_undepend_item(dest_tpg);
1703 ptr += tid_len;
1704 tpdl -= tid_len;
1705 tid_len = 0;
1706 continue;
1707 }
1708 /*
1709 * Allocate a struct pr_transport_id_holder and setup
1710 * the dest_node_acl and dest_se_deve pointers for the
1711 * loop below.
1712 */
1713 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
1714 GFP_KERNEL);
1715 if (!(tidh_new)) {
1716 printk(KERN_ERR "Unable to allocate tidh_new\n");
1717 core_scsi3_lunacl_undepend_item(dest_se_deve);
1718 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1719 core_scsi3_tpg_undepend_item(dest_tpg);
1720 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
1721 goto out;
1722 }
1723 INIT_LIST_HEAD(&tidh_new->dest_list);
1724 tidh_new->dest_tpg = dest_tpg;
1725 tidh_new->dest_node_acl = dest_node_acl;
1726 tidh_new->dest_se_deve = dest_se_deve;
1727
1728 /*
1729 * Allocate, but do NOT add the registration for the
1730 * TransportID referenced SCSI Initiator port. This
1731 * done because of the following from spc4r17 in section
1732 * 6.14.3 wrt SPEC_I_PT:
1733 *
1734 * "If a registration fails for any initiator port (e.g., if th
1735 * logical unit does not have enough resources available to
1736 * hold the registration information), no registrations shall be
1737 * made, and the command shall be terminated with
1738 * CHECK CONDITION status."
1739 *
1740 * That means we call __core_scsi3_alloc_registration() here,
1741 * and then call __core_scsi3_add_registration() in the
1742 * 2nd loop which will never fail.
1743 */
1744 dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
1745 dest_node_acl, dest_se_deve, iport_ptr,
1746 sa_res_key, all_tg_pt, aptpl);
1747 if (!(dest_pr_reg)) {
1748 core_scsi3_lunacl_undepend_item(dest_se_deve);
1749 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1750 core_scsi3_tpg_undepend_item(dest_tpg);
1751 kfree(tidh_new);
1752 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
1753 goto out;
1754 }
1755 tidh_new->dest_pr_reg = dest_pr_reg;
1756 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1757
1758 ptr += tid_len;
1759 tpdl -= tid_len;
1760 tid_len = 0;
1761
1762 }
1763 /*
1764 * Go ahead and create a registrations from tid_dest_list for the
1765 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
1766 * and dest_se_deve.
1767 *
1768 * The SA Reservation Key from the PROUT is set for the
1769 * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1
1770 * means that the TransportID Initiator port will be
1771 * registered on all of the target ports in the SCSI target device
1772 * ALL_TG_PT=0 means the registration will only be for the
1773 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
1774 * was received.
1775 */
1776 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1777 dest_tpg = tidh->dest_tpg;
1778 dest_node_acl = tidh->dest_node_acl;
1779 dest_se_deve = tidh->dest_se_deve;
1780 dest_pr_reg = tidh->dest_pr_reg;
1781 dest_local_nexus = tidh->dest_local_nexus;
1782
1783 list_del(&tidh->dest_list);
1784 kfree(tidh);
1785
1786 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1787 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
1788 PR_REG_ISID_ID_LEN);
1789
1790 __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
1791 dest_pr_reg, 0, 0);
1792
1793 printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
1794 " registered Transport ID for Node: %s%s Mapped LUN:"
1795 " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
1796 dest_node_acl->initiatorname, (prf_isid) ?
1797 &i_buf[0] : "", dest_se_deve->mapped_lun);
1798
1799 if (dest_local_nexus)
1800 continue;
1801
1802 core_scsi3_lunacl_undepend_item(dest_se_deve);
1803 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1804 core_scsi3_tpg_undepend_item(dest_tpg);
1805 }
1806
1807 return 0;
1808out:
1809 /*
1810 * For the failure case, release everything from tid_dest_list
1811 * including *dest_pr_reg and the configfs dependances..
1812 */
1813 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1814 dest_tpg = tidh->dest_tpg;
1815 dest_node_acl = tidh->dest_node_acl;
1816 dest_se_deve = tidh->dest_se_deve;
1817 dest_pr_reg = tidh->dest_pr_reg;
1818 dest_local_nexus = tidh->dest_local_nexus;
1819
1820 list_del(&tidh->dest_list);
1821 kfree(tidh);
1822 /*
1823 * Release any extra ALL_TG_PT=1 registrations for
1824 * the SPEC_I_PT=1 case.
1825 */
1826 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1827 &dest_pr_reg->pr_reg_atp_list,
1828 pr_reg_atp_mem_list) {
1829 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1830 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1831 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
1832 }
1833
1834 kfree(dest_pr_reg->pr_aptpl_buf);
1835 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
1836
1837 if (dest_local_nexus)
1838 continue;
1839
1840 core_scsi3_lunacl_undepend_item(dest_se_deve);
1841 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1842 core_scsi3_tpg_undepend_item(dest_tpg);
1843 }
1844 return ret;
1845}
1846
1847/*
1848 * Called with struct se_device->dev_reservation_lock held
1849 */
1850static int __core_scsi3_update_aptpl_buf(
1851 struct se_device *dev,
1852 unsigned char *buf,
1853 u32 pr_aptpl_buf_len,
1854 int clear_aptpl_metadata)
1855{
1856 struct se_lun *lun;
1857 struct se_portal_group *tpg;
1858 struct se_subsystem_dev *su_dev = SU_DEV(dev);
1859 struct t10_pr_registration *pr_reg;
1860 unsigned char tmp[512], isid_buf[32];
1861 ssize_t len = 0;
1862 int reg_count = 0;
1863
1864 memset(buf, 0, pr_aptpl_buf_len);
1865 /*
1866 * Called to clear metadata once APTPL has been deactivated.
1867 */
1868 if (clear_aptpl_metadata) {
1869 snprintf(buf, pr_aptpl_buf_len,
1870 "No Registrations or Reservations\n");
1871 return 0;
1872 }
1873 /*
1874 * Walk the registration list..
1875 */
1876 spin_lock(&T10_RES(su_dev)->registration_lock);
1877 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
1878 pr_reg_list) {
1879
1880 tmp[0] = '\0';
1881 isid_buf[0] = '\0';
1882 tpg = pr_reg->pr_reg_nacl->se_tpg;
1883 lun = pr_reg->pr_reg_tg_pt_lun;
1884 /*
1885 * Write out any ISID value to APTPL metadata that was included
1886 * in the original registration.
1887 */
1888 if (pr_reg->isid_present_at_reg)
1889 snprintf(isid_buf, 32, "initiator_sid=%s\n",
1890 pr_reg->pr_reg_isid);
1891 /*
1892 * Include special metadata if the pr_reg matches the
1893 * reservation holder.
1894 */
1895 if (dev->dev_pr_res_holder == pr_reg) {
1896 snprintf(tmp, 512, "PR_REG_START: %d"
1897 "\ninitiator_fabric=%s\n"
1898 "initiator_node=%s\n%s"
1899 "sa_res_key=%llu\n"
1900 "res_holder=1\nres_type=%02x\n"
1901 "res_scope=%02x\nres_all_tg_pt=%d\n"
1902 "mapped_lun=%u\n", reg_count,
1903 TPG_TFO(tpg)->get_fabric_name(),
1904 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1905 pr_reg->pr_res_key, pr_reg->pr_res_type,
1906 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
1907 pr_reg->pr_res_mapped_lun);
1908 } else {
1909 snprintf(tmp, 512, "PR_REG_START: %d\n"
1910 "initiator_fabric=%s\ninitiator_node=%s\n%s"
1911 "sa_res_key=%llu\nres_holder=0\n"
1912 "res_all_tg_pt=%d\nmapped_lun=%u\n",
1913 reg_count, TPG_TFO(tpg)->get_fabric_name(),
1914 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1915 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
1916 pr_reg->pr_res_mapped_lun);
1917 }
1918
1919 if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
1920 printk(KERN_ERR "Unable to update renaming"
1921 " APTPL metadata\n");
1922 spin_unlock(&T10_RES(su_dev)->registration_lock);
1923 return -1;
1924 }
1925 len += sprintf(buf+len, "%s", tmp);
1926
1927 /*
1928 * Include information about the associated SCSI target port.
1929 */
1930 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
1931 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
1932 " %d\n", TPG_TFO(tpg)->get_fabric_name(),
1933 TPG_TFO(tpg)->tpg_get_wwn(tpg),
1934 TPG_TFO(tpg)->tpg_get_tag(tpg),
1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1936
1937 if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
1938 printk(KERN_ERR "Unable to update renaming"
1939 " APTPL metadata\n");
1940 spin_unlock(&T10_RES(su_dev)->registration_lock);
1941 return -1;
1942 }
1943 len += sprintf(buf+len, "%s", tmp);
1944 reg_count++;
1945 }
1946 spin_unlock(&T10_RES(su_dev)->registration_lock);
1947
1948 if (!(reg_count))
1949 len += sprintf(buf+len, "No Registrations or Reservations");
1950
1951 return 0;
1952}
1953
1954static int core_scsi3_update_aptpl_buf(
1955 struct se_device *dev,
1956 unsigned char *buf,
1957 u32 pr_aptpl_buf_len,
1958 int clear_aptpl_metadata)
1959{
1960 int ret;
1961
1962 spin_lock(&dev->dev_reservation_lock);
1963 ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
1964 clear_aptpl_metadata);
1965 spin_unlock(&dev->dev_reservation_lock);
1966
1967 return ret;
1968}
1969
1970/*
1971 * Called with struct se_device->aptpl_file_mutex held
1972 */
1973static int __core_scsi3_write_aptpl_to_file(
1974 struct se_device *dev,
1975 unsigned char *buf,
1976 u32 pr_aptpl_buf_len)
1977{
1978 struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
1979 struct file *file;
1980 struct iovec iov[1];
1981 mm_segment_t old_fs;
1982 int flags = O_RDWR | O_CREAT | O_TRUNC;
1983 char path[512];
1984 int ret;
1985
1986 memset(iov, 0, sizeof(struct iovec));
1987 memset(path, 0, 512);
1988
1989 if (strlen(&wwn->unit_serial[0]) > 512) {
1990 printk(KERN_ERR "WWN value for struct se_device does not fit"
1991 " into path buffer\n");
1992 return -1;
1993 }
1994
1995 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
1996 file = filp_open(path, flags, 0600);
1997 if (IS_ERR(file) || !file || !file->f_dentry) {
1998 printk(KERN_ERR "filp_open(%s) for APTPL metadata"
1999 " failed\n", path);
2000 return -1;
2001 }
2002
2003 iov[0].iov_base = &buf[0];
2004 if (!(pr_aptpl_buf_len))
2005 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
2006 else
2007 iov[0].iov_len = pr_aptpl_buf_len;
2008
2009 old_fs = get_fs();
2010 set_fs(get_ds());
2011 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
2012 set_fs(old_fs);
2013
2014 if (ret < 0) {
2015 printk("Error writing APTPL metadata file: %s\n", path);
2016 filp_close(file, NULL);
2017 return -1;
2018 }
2019 filp_close(file, NULL);
2020
2021 return 0;
2022}
2023
2024static int core_scsi3_update_and_write_aptpl(
2025 struct se_device *dev,
2026 unsigned char *in_buf,
2027 u32 in_pr_aptpl_buf_len)
2028{
2029 unsigned char null_buf[64], *buf;
2030 u32 pr_aptpl_buf_len;
2031 int ret, clear_aptpl_metadata = 0;
2032 /*
2033 * Can be called with a NULL pointer from PROUT service action CLEAR
2034 */
2035 if (!(in_buf)) {
2036 memset(null_buf, 0, 64);
2037 buf = &null_buf[0];
2038 /*
2039 * This will clear the APTPL metadata to:
2040 * "No Registrations or Reservations" status
2041 */
2042 pr_aptpl_buf_len = 64;
2043 clear_aptpl_metadata = 1;
2044 } else {
2045 buf = in_buf;
2046 pr_aptpl_buf_len = in_pr_aptpl_buf_len;
2047 }
2048
2049 ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
2050 clear_aptpl_metadata);
2051 if (ret != 0)
2052 return -1;
2053 /*
2054 * __core_scsi3_write_aptpl_to_file() will call strlen()
2055 * on the passed buf to determine pr_aptpl_buf_len.
2056 */
2057 ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
2058 if (ret != 0)
2059 return -1;
2060
2061 return ret;
2062}
2063
2064static int core_scsi3_emulate_pro_register(
2065 struct se_cmd *cmd,
2066 u64 res_key,
2067 u64 sa_res_key,
2068 int aptpl,
2069 int all_tg_pt,
2070 int spec_i_pt,
2071 int ignore_key)
2072{
2073 struct se_session *se_sess = SE_SESS(cmd);
2074 struct se_device *dev = SE_DEV(cmd);
2075 struct se_dev_entry *se_deve;
2076 struct se_lun *se_lun = SE_LUN(cmd);
2077 struct se_portal_group *se_tpg;
2078 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
2079 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2080 /* Used for APTPL metadata w/ UNREGISTER */
2081 unsigned char *pr_aptpl_buf = NULL;
2082 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
2083 int pr_holder = 0, ret = 0, type;
2084
2085 if (!(se_sess) || !(se_lun)) {
2086 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2087 return PYX_TRANSPORT_LU_COMM_FAILURE;
2088 }
2089 se_tpg = se_sess->se_tpg;
2090 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2091
2092 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
2093 memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
2094 TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
2095 PR_REG_ISID_LEN);
2096 isid_ptr = &isid_buf[0];
2097 }
2098 /*
2099 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
2100 */
2101 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2102 if (!(pr_reg_e)) {
2103 if (res_key) {
2104 printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
2105 " for SA REGISTER, returning CONFLICT\n");
2106 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2107 }
2108 /*
2109 * Do nothing but return GOOD status.
2110 */
2111 if (!(sa_res_key))
2112 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2113
2114 if (!(spec_i_pt)) {
2115 /*
2116 * Perform the Service Action REGISTER on the Initiator
2117 * Port Endpoint that the PRO was received from on the
2118 * Logical Unit of the SCSI device server.
2119 */
2120 ret = core_scsi3_alloc_registration(SE_DEV(cmd),
2121 se_sess->se_node_acl, se_deve, isid_ptr,
2122 sa_res_key, all_tg_pt, aptpl,
2123 ignore_key, 0);
2124 if (ret != 0) {
2125 printk(KERN_ERR "Unable to allocate"
2126 " struct t10_pr_registration\n");
2127 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2128 }
2129 } else {
2130 /*
2131 * Register both the Initiator port that received
2132 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
2133 * TransportID from Parameter list and loop through
2134 * fabric dependent parameter list while calling
2135 * logic from of core_scsi3_alloc_registration() for
2136 * each TransportID provided SCSI Initiator Port/Device
2137 */
2138 ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
2139 isid_ptr, sa_res_key, all_tg_pt, aptpl);
2140 if (ret != 0)
2141 return ret;
2142 }
2143 /*
2144 * Nothing left to do for the APTPL=0 case.
2145 */
2146 if (!(aptpl)) {
2147 pr_tmpl->pr_aptpl_active = 0;
2148 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
2149 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
2150 " REGISTER\n");
2151 return 0;
2152 }
2153 /*
2154 * Locate the newly allocated local I_T Nexus *pr_reg, and
2155 * update the APTPL metadata information using its
2156 * preallocated *pr_reg->pr_aptpl_buf.
2157 */
2158 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
2159 se_sess->se_node_acl, se_sess);
2160
2161 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2162 &pr_reg->pr_aptpl_buf[0],
2163 pr_tmpl->pr_aptpl_buf_len);
2164 if (!(ret)) {
2165 pr_tmpl->pr_aptpl_active = 1;
2166 printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
2167 }
2168
2169 core_scsi3_put_pr_reg(pr_reg);
2170 return ret;
2171 } else {
2172 /*
2173 * Locate the existing *pr_reg via struct se_node_acl pointers
2174 */
2175 pr_reg = pr_reg_e;
2176 type = pr_reg->pr_res_type;
2177
2178 if (!(ignore_key)) {
2179 if (res_key != pr_reg->pr_res_key) {
2180 printk(KERN_ERR "SPC-3 PR REGISTER: Received"
2181 " res_key: 0x%016Lx does not match"
2182 " existing SA REGISTER res_key:"
2183 " 0x%016Lx\n", res_key,
2184 pr_reg->pr_res_key);
2185 core_scsi3_put_pr_reg(pr_reg);
2186 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2187 }
2188 }
2189 if (spec_i_pt) {
2190 printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
2191 " set while sa_res_key=0\n");
2192 core_scsi3_put_pr_reg(pr_reg);
2193 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2194 }
2195 /*
2196 * An existing ALL_TG_PT=1 registration being released
2197 * must also set ALL_TG_PT=1 in the incoming PROUT.
2198 */
2199 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
2200 printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
2201 " registration exists, but ALL_TG_PT=1 bit not"
2202 " present in received PROUT\n");
2203 core_scsi3_put_pr_reg(pr_reg);
2204 return PYX_TRANSPORT_INVALID_CDB_FIELD;
2205 }
2206 /*
2207 * Allocate APTPL metadata buffer used for UNREGISTER ops
2208 */
2209 if (aptpl) {
2210 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
2211 GFP_KERNEL);
2212 if (!(pr_aptpl_buf)) {
2213 printk(KERN_ERR "Unable to allocate"
2214 " pr_aptpl_buf\n");
2215 core_scsi3_put_pr_reg(pr_reg);
2216 return PYX_TRANSPORT_LU_COMM_FAILURE;
2217 }
2218 }
2219 /*
2220 * sa_res_key=0 Unregister Reservation Key for registered I_T
2221 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
2222 * Nexus.
2223 */
2224 if (!(sa_res_key)) {
2225 pr_holder = core_scsi3_check_implict_release(
2226 SE_DEV(cmd), pr_reg);
2227 if (pr_holder < 0) {
2228 kfree(pr_aptpl_buf);
2229 core_scsi3_put_pr_reg(pr_reg);
2230 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2231 }
2232
2233 spin_lock(&pr_tmpl->registration_lock);
2234 /*
2235 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
2236 * and matching pr_res_key.
2237 */
2238 if (pr_reg->pr_reg_all_tg_pt) {
2239 list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
2240 &pr_tmpl->registration_list,
2241 pr_reg_list) {
2242
2243 if (!(pr_reg_p->pr_reg_all_tg_pt))
2244 continue;
2245
2246 if (pr_reg_p->pr_res_key != res_key)
2247 continue;
2248
2249 if (pr_reg == pr_reg_p)
2250 continue;
2251
2252 if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
2253 pr_reg_p->pr_reg_nacl->initiatorname))
2254 continue;
2255
2256 __core_scsi3_free_registration(dev,
2257 pr_reg_p, NULL, 0);
2258 }
2259 }
2260 /*
2261 * Release the calling I_T Nexus registration now..
2262 */
2263 __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
2264 NULL, 1);
2265 /*
2266 * From spc4r17, section 5.7.11.3 Unregistering
2267 *
2268 * If the persistent reservation is a registrants only
2269 * type, the device server shall establish a unit
2270 * attention condition for the initiator port associated
2271 * with every registered I_T nexus except for the I_T
2272 * nexus on which the PERSISTENT RESERVE OUT command was
2273 * received, with the additional sense code set to
2274 * RESERVATIONS RELEASED.
2275 */
2276 if (pr_holder &&
2277 ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
2278 (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
2279 list_for_each_entry(pr_reg_p,
2280 &pr_tmpl->registration_list,
2281 pr_reg_list) {
2282
2283 core_scsi3_ua_allocate(
2284 pr_reg_p->pr_reg_nacl,
2285 pr_reg_p->pr_res_mapped_lun,
2286 0x2A,
2287 ASCQ_2AH_RESERVATIONS_RELEASED);
2288 }
2289 }
2290 spin_unlock(&pr_tmpl->registration_lock);
2291
2292 if (!(aptpl)) {
2293 pr_tmpl->pr_aptpl_active = 0;
2294 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2295 printk("SPC-3 PR: Set APTPL Bit Deactivated"
2296 " for UNREGISTER\n");
2297 return 0;
2298 }
2299
2300 ret = core_scsi3_update_and_write_aptpl(dev,
2301 &pr_aptpl_buf[0],
2302 pr_tmpl->pr_aptpl_buf_len);
2303 if (!(ret)) {
2304 pr_tmpl->pr_aptpl_active = 1;
2305 printk("SPC-3 PR: Set APTPL Bit Activated"
2306 " for UNREGISTER\n");
2307 }
2308
2309 kfree(pr_aptpl_buf);
2310 return ret;
2311 } else {
2312 /*
2313 * Increment PRgeneration counter for struct se_device"
2314 * upon a successful REGISTER, see spc4r17 section 6.3.2
2315 * READ_KEYS service action.
2316 */
2317 pr_reg->pr_res_generation = core_scsi3_pr_generation(
2318 SE_DEV(cmd));
2319 pr_reg->pr_res_key = sa_res_key;
2320 printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2321 " Key for %s to: 0x%016Lx PRgeneration:"
2322 " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
2323 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
2324 pr_reg->pr_reg_nacl->initiatorname,
2325 pr_reg->pr_res_key, pr_reg->pr_res_generation);
2326
2327 if (!(aptpl)) {
2328 pr_tmpl->pr_aptpl_active = 0;
2329 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2330 core_scsi3_put_pr_reg(pr_reg);
2331 printk("SPC-3 PR: Set APTPL Bit Deactivated"
2332 " for REGISTER\n");
2333 return 0;
2334 }
2335
2336 ret = core_scsi3_update_and_write_aptpl(dev,
2337 &pr_aptpl_buf[0],
2338 pr_tmpl->pr_aptpl_buf_len);
2339 if (!(ret)) {
2340 pr_tmpl->pr_aptpl_active = 1;
2341 printk("SPC-3 PR: Set APTPL Bit Activated"
2342 " for REGISTER\n");
2343 }
2344
2345 kfree(pr_aptpl_buf);
2346 core_scsi3_put_pr_reg(pr_reg);
2347 }
2348 }
2349 return 0;
2350}
2351
2352unsigned char *core_scsi3_pr_dump_type(int type)
2353{
2354 switch (type) {
2355 case PR_TYPE_WRITE_EXCLUSIVE:
2356 return "Write Exclusive Access";
2357 case PR_TYPE_EXCLUSIVE_ACCESS:
2358 return "Exclusive Access";
2359 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2360 return "Write Exclusive Access, Registrants Only";
2361 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2362 return "Exclusive Access, Registrants Only";
2363 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2364 return "Write Exclusive Access, All Registrants";
2365 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2366 return "Exclusive Access, All Registrants";
2367 default:
2368 break;
2369 }
2370
2371 return "Unknown SPC-3 PR Type";
2372}
2373
2374static int core_scsi3_pro_reserve(
2375 struct se_cmd *cmd,
2376 struct se_device *dev,
2377 int type,
2378 int scope,
2379 u64 res_key)
2380{
2381 struct se_session *se_sess = SE_SESS(cmd);
2382 struct se_dev_entry *se_deve;
2383 struct se_lun *se_lun = SE_LUN(cmd);
2384 struct se_portal_group *se_tpg;
2385 struct t10_pr_registration *pr_reg, *pr_res_holder;
2386 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2387 char i_buf[PR_REG_ISID_ID_LEN];
2388 int ret, prf_isid;
2389
2390 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2391
2392 if (!(se_sess) || !(se_lun)) {
2393 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2394 return PYX_TRANSPORT_LU_COMM_FAILURE;
2395 }
2396 se_tpg = se_sess->se_tpg;
2397 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2398 /*
2399 * Locate the existing *pr_reg via struct se_node_acl pointers
2400 */
2401 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
2402 se_sess);
2403 if (!(pr_reg)) {
2404 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2405 " PR_REGISTERED *pr_reg for RESERVE\n");
2406 return PYX_TRANSPORT_LU_COMM_FAILURE;
2407 }
2408 /*
2409 * From spc4r17 Section 5.7.9: Reserving:
2410 *
2411 * An application client creates a persistent reservation by issuing
2412 * a PERSISTENT RESERVE OUT command with RESERVE service action through
2413 * a registered I_T nexus with the following parameters:
2414 * a) RESERVATION KEY set to the value of the reservation key that is
2415 * registered with the logical unit for the I_T nexus; and
2416 */
2417 if (res_key != pr_reg->pr_res_key) {
2418 printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
2419 " does not match existing SA REGISTER res_key:"
2420 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2421 core_scsi3_put_pr_reg(pr_reg);
2422 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2423 }
2424 /*
2425 * From spc4r17 Section 5.7.9: Reserving:
2426 *
2427 * From above:
2428 * b) TYPE field and SCOPE field set to the persistent reservation
2429 * being created.
2430 *
2431 * Only one persistent reservation is allowed at a time per logical unit
2432 * and that persistent reservation has a scope of LU_SCOPE.
2433 */
2434 if (scope != PR_SCOPE_LU_SCOPE) {
2435 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2436 core_scsi3_put_pr_reg(pr_reg);
2437 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2438 }
2439 /*
2440 * See if we have an existing PR reservation holder pointer at
2441 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
2442 * *pr_res_holder.
2443 */
2444 spin_lock(&dev->dev_reservation_lock);
2445 pr_res_holder = dev->dev_pr_res_holder;
2446 if ((pr_res_holder)) {
2447 /*
2448 * From spc4r17 Section 5.7.9: Reserving:
2449 *
2450 * If the device server receives a PERSISTENT RESERVE OUT
2451 * command from an I_T nexus other than a persistent reservation
2452 * holder (see 5.7.10) that attempts to create a persistent
2453 * reservation when a persistent reservation already exists for
2454 * the logical unit, then the command shall be completed with
2455 * RESERVATION CONFLICT status.
2456 */
2457 if (pr_res_holder != pr_reg) {
2458 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2459 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
2460 " [%s]: %s while reservation already held by"
2461 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2462 CMD_TFO(cmd)->get_fabric_name(),
2463 se_sess->se_node_acl->initiatorname,
2464 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2465 pr_res_holder->pr_reg_nacl->initiatorname);
2466
2467 spin_unlock(&dev->dev_reservation_lock);
2468 core_scsi3_put_pr_reg(pr_reg);
2469 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2470 }
2471 /*
2472 * From spc4r17 Section 5.7.9: Reserving:
2473 *
2474 * If a persistent reservation holder attempts to modify the
2475 * type or scope of an existing persistent reservation, the
2476 * command shall be completed with RESERVATION CONFLICT status.
2477 */
2478 if ((pr_res_holder->pr_res_type != type) ||
2479 (pr_res_holder->pr_res_scope != scope)) {
2480 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2481 printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
2482 " [%s]: %s trying to change TYPE and/or SCOPE,"
2483 " while reservation already held by [%s]: %s,"
2484 " returning RESERVATION_CONFLICT\n",
2485 CMD_TFO(cmd)->get_fabric_name(),
2486 se_sess->se_node_acl->initiatorname,
2487 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2488 pr_res_holder->pr_reg_nacl->initiatorname);
2489
2490 spin_unlock(&dev->dev_reservation_lock);
2491 core_scsi3_put_pr_reg(pr_reg);
2492 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2493 }
2494 /*
2495 * From spc4r17 Section 5.7.9: Reserving:
2496 *
2497 * If the device server receives a PERSISTENT RESERVE OUT
2498 * command with RESERVE service action where the TYPE field and
2499 * the SCOPE field contain the same values as the existing type
2500 * and scope from a persistent reservation holder, it shall not
2501 * make any change to the existing persistent reservation and
2502 * shall completethe command with GOOD status.
2503 */
2504 spin_unlock(&dev->dev_reservation_lock);
2505 core_scsi3_put_pr_reg(pr_reg);
2506 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2507 }
2508 /*
2509 * Otherwise, our *pr_reg becomes the PR reservation holder for said
2510 * TYPE/SCOPE. Also set the received scope and type in *pr_reg.
2511 */
2512 pr_reg->pr_res_scope = scope;
2513 pr_reg->pr_res_type = type;
2514 pr_reg->pr_res_holder = 1;
2515 dev->dev_pr_res_holder = pr_reg;
2516 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2517 PR_REG_ISID_ID_LEN);
2518
2519 printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
2520 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2521 CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
2522 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2523 printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
2524 CMD_TFO(cmd)->get_fabric_name(),
2525 se_sess->se_node_acl->initiatorname,
2526 (prf_isid) ? &i_buf[0] : "");
2527 spin_unlock(&dev->dev_reservation_lock);
2528
2529 if (pr_tmpl->pr_aptpl_active) {
2530 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2531 &pr_reg->pr_aptpl_buf[0],
2532 pr_tmpl->pr_aptpl_buf_len);
2533 if (!(ret))
2534 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
2535 " for RESERVE\n");
2536 }
2537
2538 core_scsi3_put_pr_reg(pr_reg);
2539 return 0;
2540}
2541
2542static int core_scsi3_emulate_pro_reserve(
2543 struct se_cmd *cmd,
2544 int type,
2545 int scope,
2546 u64 res_key)
2547{
2548 struct se_device *dev = cmd->se_dev;
2549 int ret = 0;
2550
2551 switch (type) {
2552 case PR_TYPE_WRITE_EXCLUSIVE:
2553 case PR_TYPE_EXCLUSIVE_ACCESS:
2554 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2555 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2556 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2557 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2558 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
2559 break;
2560 default:
2561 printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
2562 " 0x%02x\n", type);
2563 return PYX_TRANSPORT_INVALID_CDB_FIELD;
2564 }
2565
2566 return ret;
2567}
2568
2569/*
2570 * Called with struct se_device->dev_reservation_lock held.
2571 */
2572static void __core_scsi3_complete_pro_release(
2573 struct se_device *dev,
2574 struct se_node_acl *se_nacl,
2575 struct t10_pr_registration *pr_reg,
2576 int explict)
2577{
2578 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2579 char i_buf[PR_REG_ISID_ID_LEN];
2580 int prf_isid;
2581
2582 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2583 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2584 PR_REG_ISID_ID_LEN);
2585 /*
2586 * Go ahead and release the current PR reservation holder.
2587 */
2588 dev->dev_pr_res_holder = NULL;
2589
2590 printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2591 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2592 tfo->get_fabric_name(), (explict) ? "explict" : "implict",
2593 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2594 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2595 printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
2596 tfo->get_fabric_name(), se_nacl->initiatorname,
2597 (prf_isid) ? &i_buf[0] : "");
2598 /*
2599 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
2600 */
2601 pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
2602}
2603
2604static int core_scsi3_emulate_pro_release(
2605 struct se_cmd *cmd,
2606 int type,
2607 int scope,
2608 u64 res_key)
2609{
2610 struct se_device *dev = cmd->se_dev;
2611 struct se_session *se_sess = SE_SESS(cmd);
2612 struct se_lun *se_lun = SE_LUN(cmd);
2613 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
2614 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2615 int ret, all_reg = 0;
2616
2617 if (!(se_sess) || !(se_lun)) {
2618 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2619 return PYX_TRANSPORT_LU_COMM_FAILURE;
2620 }
2621 /*
2622 * Locate the existing *pr_reg via struct se_node_acl pointers
2623 */
2624 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2625 if (!(pr_reg)) {
2626 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2627 " PR_REGISTERED *pr_reg for RELEASE\n");
2628 return PYX_TRANSPORT_LU_COMM_FAILURE;
2629 }
2630 /*
2631 * From spc4r17 Section 5.7.11.2 Releasing:
2632 *
2633 * If there is no persistent reservation or in response to a persistent
2634 * reservation release request from a registered I_T nexus that is not a
2635 * persistent reservation holder (see 5.7.10), the device server shall
2636 * do the following:
2637 *
2638 * a) Not release the persistent reservation, if any;
2639 * b) Not remove any registrations; and
2640 * c) Complete the command with GOOD status.
2641 */
2642 spin_lock(&dev->dev_reservation_lock);
2643 pr_res_holder = dev->dev_pr_res_holder;
2644 if (!(pr_res_holder)) {
2645 /*
2646 * No persistent reservation, return GOOD status.
2647 */
2648 spin_unlock(&dev->dev_reservation_lock);
2649 core_scsi3_put_pr_reg(pr_reg);
2650 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2651 }
2652 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2653 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
2654 all_reg = 1;
2655
2656 if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
2657 /*
2658 * Non 'All Registrants' PR Type cases..
2659 * Release request from a registered I_T nexus that is not a
2660 * persistent reservation holder. return GOOD status.
2661 */
2662 spin_unlock(&dev->dev_reservation_lock);
2663 core_scsi3_put_pr_reg(pr_reg);
2664 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
2665 }
2666 /*
2667 * From spc4r17 Section 5.7.11.2 Releasing:
2668 *
2669 * Only the persistent reservation holder (see 5.7.10) is allowed to
2670 * release a persistent reservation.
2671 *
2672 * An application client releases the persistent reservation by issuing
2673 * a PERSISTENT RESERVE OUT command with RELEASE service action through
2674 * an I_T nexus that is a persistent reservation holder with the
2675 * following parameters:
2676 *
2677 * a) RESERVATION KEY field set to the value of the reservation key
2678 * that is registered with the logical unit for the I_T nexus;
2679 */
2680 if (res_key != pr_reg->pr_res_key) {
2681 printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
2682 " does not match existing SA REGISTER res_key:"
2683 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2684 spin_unlock(&dev->dev_reservation_lock);
2685 core_scsi3_put_pr_reg(pr_reg);
2686 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2687 }
2688 /*
2689 * From spc4r17 Section 5.7.11.2 Releasing and above:
2690 *
2691 * b) TYPE field and SCOPE field set to match the persistent
2692 * reservation being released.
2693 */
2694 if ((pr_res_holder->pr_res_type != type) ||
2695 (pr_res_holder->pr_res_scope != scope)) {
2696 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2697 printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
2698 " reservation from [%s]: %s with different TYPE "
2699 "and/or SCOPE while reservation already held by"
2700 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2701 CMD_TFO(cmd)->get_fabric_name(),
2702 se_sess->se_node_acl->initiatorname,
2703 TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
2704 pr_res_holder->pr_reg_nacl->initiatorname);
2705
2706 spin_unlock(&dev->dev_reservation_lock);
2707 core_scsi3_put_pr_reg(pr_reg);
2708 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2709 }
2710 /*
2711 * In response to a persistent reservation release request from the
2712 * persistent reservation holder the device server shall perform a
2713 * release by doing the following as an uninterrupted series of actions:
2714 * a) Release the persistent reservation;
2715 * b) Not remove any registration(s);
2716 * c) If the released persistent reservation is a registrants only type
2717 * or all registrants type persistent reservation,
2718 * the device server shall establish a unit attention condition for
2719 * the initiator port associated with every regis-
2720 * tered I_T nexus other than I_T nexus on which the PERSISTENT
2721 * RESERVE OUT command with RELEASE service action was received,
2722 * with the additional sense code set to RESERVATIONS RELEASED; and
2723 * d) If the persistent reservation is of any other type, the device
2724 * server shall not establish a unit attention condition.
2725 */
2726 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
2727 pr_reg, 1);
2728
2729 spin_unlock(&dev->dev_reservation_lock);
2730
2731 if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
2732 (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
2733 (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
2734 (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
2735 /*
2736 * If no UNIT ATTENTION conditions will be established for
2737 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
2738 * go ahead and check for APTPL=1 update+write below
2739 */
2740 goto write_aptpl;
2741 }
2742
2743 spin_lock(&pr_tmpl->registration_lock);
2744 list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
2745 pr_reg_list) {
2746 /*
2747 * Do not establish a UNIT ATTENTION condition
2748 * for the calling I_T Nexus
2749 */
2750 if (pr_reg_p == pr_reg)
2751 continue;
2752
2753 core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
2754 pr_reg_p->pr_res_mapped_lun,
2755 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
2756 }
2757 spin_unlock(&pr_tmpl->registration_lock);
2758
2759write_aptpl:
2760 if (pr_tmpl->pr_aptpl_active) {
2761 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
2762 &pr_reg->pr_aptpl_buf[0],
2763 pr_tmpl->pr_aptpl_buf_len);
2764 if (!(ret))
2765 printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
2766 }
2767
2768 core_scsi3_put_pr_reg(pr_reg);
2769 return 0;
2770}
2771
2772static int core_scsi3_emulate_pro_clear(
2773 struct se_cmd *cmd,
2774 u64 res_key)
2775{
2776 struct se_device *dev = cmd->se_dev;
2777 struct se_node_acl *pr_reg_nacl;
2778 struct se_session *se_sess = SE_SESS(cmd);
2779 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2780 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2781 u32 pr_res_mapped_lun = 0;
2782 int calling_it_nexus = 0;
2783 /*
2784 * Locate the existing *pr_reg via struct se_node_acl pointers
2785 */
2786 pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
2787 se_sess->se_node_acl, se_sess);
2788 if (!(pr_reg_n)) {
2789 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2790 " PR_REGISTERED *pr_reg for CLEAR\n");
2791 return PYX_TRANSPORT_LU_COMM_FAILURE;
2792 }
2793 /*
2794 * From spc4r17 section 5.7.11.6, Clearing:
2795 *
2796 * Any application client may release the persistent reservation and
2797 * remove all registrations from a device server by issuing a
2798 * PERSISTENT RESERVE OUT command with CLEAR service action through a
2799 * registered I_T nexus with the following parameter:
2800 *
2801 * a) RESERVATION KEY field set to the value of the reservation key
2802 * that is registered with the logical unit for the I_T nexus.
2803 */
2804 if (res_key != pr_reg_n->pr_res_key) {
2805 printk(KERN_ERR "SPC-3 PR REGISTER: Received"
2806 " res_key: 0x%016Lx does not match"
2807 " existing SA REGISTER res_key:"
2808 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2809 core_scsi3_put_pr_reg(pr_reg_n);
2810 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2811 }
2812 /*
2813 * a) Release the persistent reservation, if any;
2814 */
2815 spin_lock(&dev->dev_reservation_lock);
2816 pr_res_holder = dev->dev_pr_res_holder;
2817 if (pr_res_holder) {
2818 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2819 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
2820 pr_res_holder, 0);
2821 }
2822 spin_unlock(&dev->dev_reservation_lock);
2823 /*
2824 * b) Remove all registration(s) (see spc4r17 5.7.7);
2825 */
2826 spin_lock(&pr_tmpl->registration_lock);
2827 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
2828 &pr_tmpl->registration_list, pr_reg_list) {
2829
2830 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
2831 pr_reg_nacl = pr_reg->pr_reg_nacl;
2832 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
2833 __core_scsi3_free_registration(dev, pr_reg, NULL,
2834 calling_it_nexus);
2835 /*
2836 * e) Establish a unit attention condition for the initiator
2837 * port associated with every registered I_T nexus other
2838 * than the I_T nexus on which the PERSISTENT RESERVE OUT
2839 * command with CLEAR service action was received, with the
2840 * additional sense code set to RESERVATIONS PREEMPTED.
2841 */
2842 if (!(calling_it_nexus))
2843 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
2844 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
2845 }
2846 spin_unlock(&pr_tmpl->registration_lock);
2847
2848 printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
2849 CMD_TFO(cmd)->get_fabric_name());
2850
2851 if (pr_tmpl->pr_aptpl_active) {
2852 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
2853 printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
2854 " for CLEAR\n");
2855 }
2856
2857 core_scsi3_pr_generation(dev);
2858 return 0;
2859}
2860
2861/*
2862 * Called with struct se_device->dev_reservation_lock held.
2863 */
2864static void __core_scsi3_complete_pro_preempt(
2865 struct se_device *dev,
2866 struct t10_pr_registration *pr_reg,
2867 struct list_head *preempt_and_abort_list,
2868 int type,
2869 int scope,
2870 int abort)
2871{
2872 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
2873 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
2874 char i_buf[PR_REG_ISID_ID_LEN];
2875 int prf_isid;
2876
2877 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2878 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2879 PR_REG_ISID_ID_LEN);
2880 /*
2881 * Do an implict RELEASE of the existing reservation.
2882 */
2883 if (dev->dev_pr_res_holder)
2884 __core_scsi3_complete_pro_release(dev, nacl,
2885 dev->dev_pr_res_holder, 0);
2886
2887 dev->dev_pr_res_holder = pr_reg;
2888 pr_reg->pr_res_holder = 1;
2889 pr_reg->pr_res_type = type;
2890 pr_reg->pr_res_scope = scope;
2891
2892 printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2893 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2894 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2895 core_scsi3_pr_dump_type(type),
2896 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2897 printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2898 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2899 nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
2900 /*
2901 * For PREEMPT_AND_ABORT, add the preempting reservation's
2902 * struct t10_pr_registration to the list that will be compared
2903 * against received CDBs..
2904 */
2905 if (preempt_and_abort_list)
2906 list_add_tail(&pr_reg->pr_reg_abort_list,
2907 preempt_and_abort_list);
2908}
2909
2910static void core_scsi3_release_preempt_and_abort(
2911 struct list_head *preempt_and_abort_list,
2912 struct t10_pr_registration *pr_reg_holder)
2913{
2914 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2915
2916 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2917 pr_reg_abort_list) {
2918
2919 list_del(&pr_reg->pr_reg_abort_list);
2920 if (pr_reg_holder == pr_reg)
2921 continue;
2922 if (pr_reg->pr_res_holder) {
2923 printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
2924 continue;
2925 }
2926
2927 pr_reg->pr_reg_deve = NULL;
2928 pr_reg->pr_reg_nacl = NULL;
2929 kfree(pr_reg->pr_aptpl_buf);
2930 kmem_cache_free(t10_pr_reg_cache, pr_reg);
2931 }
2932}
2933
2934int core_scsi3_check_cdb_abort_and_preempt(
2935 struct list_head *preempt_and_abort_list,
2936 struct se_cmd *cmd)
2937{
2938 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2939
2940 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2941 pr_reg_abort_list) {
2942 if (pr_reg->pr_res_key == cmd->pr_res_key)
2943 return 0;
2944 }
2945
2946 return 1;
2947}
2948
2949static int core_scsi3_pro_preempt(
2950 struct se_cmd *cmd,
2951 int type,
2952 int scope,
2953 u64 res_key,
2954 u64 sa_res_key,
2955 int abort)
2956{
2957 struct se_device *dev = SE_DEV(cmd);
2958 struct se_dev_entry *se_deve;
2959 struct se_node_acl *pr_reg_nacl;
2960 struct se_session *se_sess = SE_SESS(cmd);
2961 struct list_head preempt_and_abort_list;
2962 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2963 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
2964 u32 pr_res_mapped_lun = 0;
2965 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2966 int prh_type = 0, prh_scope = 0, ret;
2967
2968 if (!(se_sess))
2969 return PYX_TRANSPORT_LU_COMM_FAILURE;
2970
2971 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2972 pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
2973 se_sess);
2974 if (!(pr_reg_n)) {
2975 printk(KERN_ERR "SPC-3 PR: Unable to locate"
2976 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2977 (abort) ? "_AND_ABORT" : "");
2978 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2979 }
2980 if (pr_reg_n->pr_res_key != res_key) {
2981 core_scsi3_put_pr_reg(pr_reg_n);
2982 return PYX_TRANSPORT_RESERVATION_CONFLICT;
2983 }
2984 if (scope != PR_SCOPE_LU_SCOPE) {
2985 printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2986 core_scsi3_put_pr_reg(pr_reg_n);
2987 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
2988 }
2989 INIT_LIST_HEAD(&preempt_and_abort_list);
2990
2991 spin_lock(&dev->dev_reservation_lock);
2992 pr_res_holder = dev->dev_pr_res_holder;
2993 if (pr_res_holder &&
2994 ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2995 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
2996 all_reg = 1;
2997
2998 if (!(all_reg) && !(sa_res_key)) {
2999 spin_unlock(&dev->dev_reservation_lock);
3000 core_scsi3_put_pr_reg(pr_reg_n);
3001 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3002 }
3003 /*
3004 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
3005 *
3006 * If the SERVICE ACTION RESERVATION KEY field does not identify a
3007 * persistent reservation holder or there is no persistent reservation
3008 * holder (i.e., there is no persistent reservation), then the device
3009 * server shall perform a preempt by doing the following in an
3010 * uninterrupted series of actions. (See below..)
3011 */
3012 if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
3013 /*
3014 * No existing or SA Reservation Key matching reservations..
3015 *
3016 * PROUT SA PREEMPT with All Registrant type reservations are
3017 * allowed to be processed without a matching SA Reservation Key
3018 */
3019 spin_lock(&pr_tmpl->registration_lock);
3020 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3021 &pr_tmpl->registration_list, pr_reg_list) {
3022 /*
3023 * Removing of registrations in non all registrants
3024 * type reservations without a matching SA reservation
3025 * key.
3026 *
3027 * a) Remove the registrations for all I_T nexuses
3028 * specified by the SERVICE ACTION RESERVATION KEY
3029 * field;
3030 * b) Ignore the contents of the SCOPE and TYPE fields;
3031 * c) Process tasks as defined in 5.7.1; and
3032 * d) Establish a unit attention condition for the
3033 * initiator port associated with every I_T nexus
3034 * that lost its registration other than the I_T
3035 * nexus on which the PERSISTENT RESERVE OUT command
3036 * was received, with the additional sense code set
3037 * to REGISTRATIONS PREEMPTED.
3038 */
3039 if (!(all_reg)) {
3040 if (pr_reg->pr_res_key != sa_res_key)
3041 continue;
3042
3043 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3044 pr_reg_nacl = pr_reg->pr_reg_nacl;
3045 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3046 __core_scsi3_free_registration(dev, pr_reg,
3047 (abort) ? &preempt_and_abort_list :
3048 NULL, calling_it_nexus);
3049 released_regs++;
3050 } else {
3051 /*
3052 * Case for any existing all registrants type
3053 * reservation, follow logic in spc4r17 section
3054 * 5.7.11.4 Preempting, Table 52 and Figure 7.
3055 *
3056 * For a ZERO SA Reservation key, release
3057 * all other registrations and do an implict
3058 * release of active persistent reservation.
3059 *
3060 * For a non-ZERO SA Reservation key, only
3061 * release the matching reservation key from
3062 * registrations.
3063 */
3064 if ((sa_res_key) &&
3065 (pr_reg->pr_res_key != sa_res_key))
3066 continue;
3067
3068 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3069 if (calling_it_nexus)
3070 continue;
3071
3072 pr_reg_nacl = pr_reg->pr_reg_nacl;
3073 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3074 __core_scsi3_free_registration(dev, pr_reg,
3075 (abort) ? &preempt_and_abort_list :
3076 NULL, 0);
3077 released_regs++;
3078 }
3079 if (!(calling_it_nexus))
3080 core_scsi3_ua_allocate(pr_reg_nacl,
3081 pr_res_mapped_lun, 0x2A,
3082 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3083 }
3084 spin_unlock(&pr_tmpl->registration_lock);
3085 /*
3086 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
3087 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
3088 * RESERVATION KEY field to a value that does not match any
3089 * registered reservation key, then the device server shall
3090 * complete the command with RESERVATION CONFLICT status.
3091 */
3092 if (!(released_regs)) {
3093 spin_unlock(&dev->dev_reservation_lock);
3094 core_scsi3_put_pr_reg(pr_reg_n);
3095 return PYX_TRANSPORT_RESERVATION_CONFLICT;
3096 }
3097 /*
3098 * For an existing all registrants type reservation
3099 * with a zero SA rservation key, preempt the existing
3100 * reservation with the new PR type and scope.
3101 */
3102 if (pr_res_holder && all_reg && !(sa_res_key)) {
3103 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3104 (abort) ? &preempt_and_abort_list : NULL,
3105 type, scope, abort);
3106
3107 if (abort)
3108 core_scsi3_release_preempt_and_abort(
3109 &preempt_and_abort_list, pr_reg_n);
3110 }
3111 spin_unlock(&dev->dev_reservation_lock);
3112
3113 if (pr_tmpl->pr_aptpl_active) {
3114 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3115 &pr_reg_n->pr_aptpl_buf[0],
3116 pr_tmpl->pr_aptpl_buf_len);
3117 if (!(ret))
3118 printk(KERN_INFO "SPC-3 PR: Updated APTPL"
3119 " metadata for PREEMPT%s\n", (abort) ?
3120 "_AND_ABORT" : "");
3121 }
3122
3123 core_scsi3_put_pr_reg(pr_reg_n);
3124 core_scsi3_pr_generation(SE_DEV(cmd));
3125 return 0;
3126 }
3127 /*
3128 * The PREEMPTing SA reservation key matches that of the
3129 * existing persistent reservation, first, we check if
3130 * we are preempting our own reservation.
3131 * From spc4r17, section 5.7.11.4.3 Preempting
3132 * persistent reservations and registration handling
3133 *
3134 * If an all registrants persistent reservation is not
3135 * present, it is not an error for the persistent
3136 * reservation holder to preempt itself (i.e., a
3137 * PERSISTENT RESERVE OUT with a PREEMPT service action
3138 * or a PREEMPT AND ABORT service action with the
3139 * SERVICE ACTION RESERVATION KEY value equal to the
3140 * persistent reservation holder's reservation key that
3141 * is received from the persistent reservation holder).
3142 * In that case, the device server shall establish the
3143 * new persistent reservation and maintain the
3144 * registration.
3145 */
3146 prh_type = pr_res_holder->pr_res_type;
3147 prh_scope = pr_res_holder->pr_res_scope;
3148 /*
3149 * If the SERVICE ACTION RESERVATION KEY field identifies a
3150 * persistent reservation holder (see 5.7.10), the device
3151 * server shall perform a preempt by doing the following as
3152 * an uninterrupted series of actions:
3153 *
3154 * a) Release the persistent reservation for the holder
3155 * identified by the SERVICE ACTION RESERVATION KEY field;
3156 */
3157 if (pr_reg_n != pr_res_holder)
3158 __core_scsi3_complete_pro_release(dev,
3159 pr_res_holder->pr_reg_nacl,
3160 dev->dev_pr_res_holder, 0);
3161 /*
3162 * b) Remove the registrations for all I_T nexuses identified
3163 * by the SERVICE ACTION RESERVATION KEY field, except the
3164 * I_T nexus that is being used for the PERSISTENT RESERVE
3165 * OUT command. If an all registrants persistent reservation
3166 * is present and the SERVICE ACTION RESERVATION KEY field
3167 * is set to zero, then all registrations shall be removed
3168 * except for that of the I_T nexus that is being used for
3169 * the PERSISTENT RESERVE OUT command;
3170 */
3171 spin_lock(&pr_tmpl->registration_lock);
3172 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3173 &pr_tmpl->registration_list, pr_reg_list) {
3174
3175 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3176 if (calling_it_nexus)
3177 continue;
3178
3179 if (pr_reg->pr_res_key != sa_res_key)
3180 continue;
3181
3182 pr_reg_nacl = pr_reg->pr_reg_nacl;
3183 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3184 __core_scsi3_free_registration(dev, pr_reg,
3185 (abort) ? &preempt_and_abort_list : NULL,
3186 calling_it_nexus);
3187 /*
3188 * e) Establish a unit attention condition for the initiator
3189 * port associated with every I_T nexus that lost its
3190 * persistent reservation and/or registration, with the
3191 * additional sense code set to REGISTRATIONS PREEMPTED;
3192 */
3193 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
3194 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3195 }
3196 spin_unlock(&pr_tmpl->registration_lock);
3197 /*
3198 * c) Establish a persistent reservation for the preempting
3199 * I_T nexus using the contents of the SCOPE and TYPE fields;
3200 */
3201 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3202 (abort) ? &preempt_and_abort_list : NULL,
3203 type, scope, abort);
3204 /*
3205 * d) Process tasks as defined in 5.7.1;
3206 * e) See above..
3207 * f) If the type or scope has changed, then for every I_T nexus
3208 * whose reservation key was not removed, except for the I_T
3209 * nexus on which the PERSISTENT RESERVE OUT command was
3210 * received, the device server shall establish a unit
3211 * attention condition for the initiator port associated with
3212 * that I_T nexus, with the additional sense code set to
3213 * RESERVATIONS RELEASED. If the type or scope have not
3214 * changed, then no unit attention condition(s) shall be
3215 * established for this reason.
3216 */
3217 if ((prh_type != type) || (prh_scope != scope)) {
3218 spin_lock(&pr_tmpl->registration_lock);
3219 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3220 &pr_tmpl->registration_list, pr_reg_list) {
3221
3222 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3223 if (calling_it_nexus)
3224 continue;
3225
3226 core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
3227 pr_reg->pr_res_mapped_lun, 0x2A,
3228 ASCQ_2AH_RESERVATIONS_RELEASED);
3229 }
3230 spin_unlock(&pr_tmpl->registration_lock);
3231 }
3232 spin_unlock(&dev->dev_reservation_lock);
3233 /*
3234 * Call LUN_RESET logic upon list of struct t10_pr_registration,
3235 * All received CDBs for the matching existing reservation and
3236 * registrations undergo ABORT_TASK logic.
3237 *
3238 * From there, core_scsi3_release_preempt_and_abort() will
3239 * release every registration in the list (which have already
3240 * been removed from the primary pr_reg list), except the
3241 * new persistent reservation holder, the calling Initiator Port.
3242 */
3243 if (abort) {
3244 core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
3245 core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
3246 pr_reg_n);
3247 }
3248
3249 if (pr_tmpl->pr_aptpl_active) {
3250 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3251 &pr_reg_n->pr_aptpl_buf[0],
3252 pr_tmpl->pr_aptpl_buf_len);
3253 if (!(ret))
3254 printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
3255 "%s\n", (abort) ? "_AND_ABORT" : "");
3256 }
3257
3258 core_scsi3_put_pr_reg(pr_reg_n);
3259 core_scsi3_pr_generation(SE_DEV(cmd));
3260 return 0;
3261}
3262
3263static int core_scsi3_emulate_pro_preempt(
3264 struct se_cmd *cmd,
3265 int type,
3266 int scope,
3267 u64 res_key,
3268 u64 sa_res_key,
3269 int abort)
3270{
3271 int ret = 0;
3272
3273 switch (type) {
3274 case PR_TYPE_WRITE_EXCLUSIVE:
3275 case PR_TYPE_EXCLUSIVE_ACCESS:
3276 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
3277 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
3278 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
3279 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
3280 ret = core_scsi3_pro_preempt(cmd, type, scope,
3281 res_key, sa_res_key, abort);
3282 break;
3283 default:
3284 printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
3285 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3286 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3287 }
3288
3289 return ret;
3290}
3291
3292
3293static int core_scsi3_emulate_pro_register_and_move(
3294 struct se_cmd *cmd,
3295 u64 res_key,
3296 u64 sa_res_key,
3297 int aptpl,
3298 int unreg)
3299{
3300 struct se_session *se_sess = SE_SESS(cmd);
3301 struct se_device *dev = SE_DEV(cmd);
3302 struct se_dev_entry *se_deve, *dest_se_deve = NULL;
3303 struct se_lun *se_lun = SE_LUN(cmd);
3304 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
3305 struct se_port *se_port;
3306 struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
3307 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3308 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3309 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
3310 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3311 unsigned char *initiator_str;
3312 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
3313 u32 tid_len, tmp_tid_len;
3314 int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
3315 unsigned short rtpi;
3316 unsigned char proto_ident;
3317
3318 if (!(se_sess) || !(se_lun)) {
3319 printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3320 return PYX_TRANSPORT_LU_COMM_FAILURE;
3321 }
3322 memset(dest_iport, 0, 64);
3323 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
3324 se_tpg = se_sess->se_tpg;
3325 tf_ops = TPG_TFO(se_tpg);
3326 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
3327 /*
3328 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
3329 * Register behaviors for a REGISTER AND MOVE service action
3330 *
3331 * Locate the existing *pr_reg via struct se_node_acl pointers
3332 */
3333 pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
3334 se_sess);
3335 if (!(pr_reg)) {
3336 printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
3337 " *pr_reg for REGISTER_AND_MOVE\n");
3338 return PYX_TRANSPORT_LU_COMM_FAILURE;
3339 }
3340 /*
3341 * The provided reservation key much match the existing reservation key
3342 * provided during this initiator's I_T nexus registration.
3343 */
3344 if (res_key != pr_reg->pr_res_key) {
3345 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
3346 " res_key: 0x%016Lx does not match existing SA REGISTER"
3347 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3348 core_scsi3_put_pr_reg(pr_reg);
3349 return PYX_TRANSPORT_RESERVATION_CONFLICT;
3350 }
3351 /*
3352 * The service active reservation key needs to be non zero
3353 */
3354 if (!(sa_res_key)) {
3355 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
3356 " sa_res_key\n");
3357 core_scsi3_put_pr_reg(pr_reg);
3358 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3359 }
3360 /*
3361 * Determine the Relative Target Port Identifier where the reservation
3362 * will be moved to for the TransportID containing SCSI initiator WWN
3363 * information.
3364 */
3365 rtpi = (buf[18] & 0xff) << 8;
3366 rtpi |= buf[19] & 0xff;
3367 tid_len = (buf[20] & 0xff) << 24;
3368 tid_len |= (buf[21] & 0xff) << 16;
3369 tid_len |= (buf[22] & 0xff) << 8;
3370 tid_len |= buf[23] & 0xff;
3371
3372 if ((tid_len + 24) != cmd->data_length) {
3373 printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
3374 " does not equal CDB data_length: %u\n", tid_len,
3375 cmd->data_length);
3376 core_scsi3_put_pr_reg(pr_reg);
3377 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3378 }
3379
3380 spin_lock(&dev->se_port_lock);
3381 list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
3382 if (se_port->sep_rtpi != rtpi)
3383 continue;
3384 dest_se_tpg = se_port->sep_tpg;
3385 if (!(dest_se_tpg))
3386 continue;
3387 dest_tf_ops = TPG_TFO(dest_se_tpg);
3388 if (!(dest_tf_ops))
3389 continue;
3390
3391 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
3392 smp_mb__after_atomic_inc();
3393 spin_unlock(&dev->se_port_lock);
3394
3395 ret = core_scsi3_tpg_depend_item(dest_se_tpg);
3396 if (ret != 0) {
3397 printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
3398 " for dest_se_tpg\n");
3399 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3400 smp_mb__after_atomic_dec();
3401 core_scsi3_put_pr_reg(pr_reg);
3402 return PYX_TRANSPORT_LU_COMM_FAILURE;
3403 }
3404
3405 spin_lock(&dev->se_port_lock);
3406 break;
3407 }
3408 spin_unlock(&dev->se_port_lock);
3409
3410 if (!(dest_se_tpg) || (!dest_tf_ops)) {
3411 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3412 " fabric ops from Relative Target Port Identifier:"
3413 " %hu\n", rtpi);
3414 core_scsi3_put_pr_reg(pr_reg);
3415 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3416 }
3417 proto_ident = (buf[24] & 0x0f);
3418#if 0
3419 printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
3420 " 0x%02x\n", proto_ident);
3421#endif
3422 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
3423 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
3424 " proto_ident: 0x%02x does not match ident: 0x%02x"
3425 " from fabric: %s\n", proto_ident,
3426 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3427 dest_tf_ops->get_fabric_name());
3428 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3429 goto out;
3430 }
3431 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3432 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3433 " containg a valid tpg_parse_pr_out_transport_id"
3434 " function pointer\n");
3435 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3436 goto out;
3437 }
3438 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
3439 (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
3440 if (!(initiator_str)) {
3441 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3442 " initiator_str from Transport ID\n");
3443 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3444 goto out;
3445 }
3446
3447 printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3448 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
3449 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3450 iport_ptr : "");
3451 /*
3452 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3453 * action specifies a TransportID that is the same as the initiator port
3454 * of the I_T nexus for the command received, then the command shall
3455 * be terminated with CHECK CONDITION status, with the sense key set to
3456 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
3457 * IN PARAMETER LIST.
3458 */
3459 pr_reg_nacl = pr_reg->pr_reg_nacl;
3460 matching_iname = (!strcmp(initiator_str,
3461 pr_reg_nacl->initiatorname)) ? 1 : 0;
3462 if (!(matching_iname))
3463 goto after_iport_check;
3464
3465 if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
3466 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3467 " matches: %s on received I_T Nexus\n", initiator_str,
3468 pr_reg_nacl->initiatorname);
3469 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3470 goto out;
3471 }
3472 if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
3473 printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
3474 " matches: %s %s on received I_T Nexus\n",
3475 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3476 pr_reg->pr_reg_isid);
3477 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3478 goto out;
3479 }
3480after_iport_check:
3481 /*
3482 * Locate the destination struct se_node_acl from the received Transport ID
3483 */
3484 spin_lock_bh(&dest_se_tpg->acl_node_lock);
3485 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3486 initiator_str);
3487 if (dest_node_acl) {
3488 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3489 smp_mb__after_atomic_inc();
3490 }
3491 spin_unlock_bh(&dest_se_tpg->acl_node_lock);
3492
3493 if (!(dest_node_acl)) {
3494 printk(KERN_ERR "Unable to locate %s dest_node_acl for"
3495 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3496 initiator_str);
3497 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3498 goto out;
3499 }
3500 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
3501 if (ret != 0) {
3502 printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
3503 " dest_node_acl\n");
3504 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3505 smp_mb__after_atomic_dec();
3506 dest_node_acl = NULL;
3507 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3508 goto out;
3509 }
3510#if 0
3511 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3512 " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
3513 dest_node_acl->initiatorname);
3514#endif
3515 /*
3516 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
3517 * PORT IDENTIFIER.
3518 */
3519 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3520 if (!(dest_se_deve)) {
3521 printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
3522 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3523 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3524 goto out;
3525 }
3526
3527 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
3528 if (ret < 0) {
3529 printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
3530 atomic_dec(&dest_se_deve->pr_ref_count);
3531 smp_mb__after_atomic_dec();
3532 dest_se_deve = NULL;
3533 ret = PYX_TRANSPORT_LU_COMM_FAILURE;
3534 goto out;
3535 }
3536#if 0
3537 printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3538 " ACL for dest_se_deve->mapped_lun: %u\n",
3539 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
3540 dest_se_deve->mapped_lun);
3541#endif
3542 /*
3543 * A persistent reservation needs to already existing in order to
3544 * successfully complete the REGISTER_AND_MOVE service action..
3545 */
3546 spin_lock(&dev->dev_reservation_lock);
3547 pr_res_holder = dev->dev_pr_res_holder;
3548 if (!(pr_res_holder)) {
3549 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
3550 " currently held\n");
3551 spin_unlock(&dev->dev_reservation_lock);
3552 ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
3553 goto out;
3554 }
3555 /*
3556 * The received on I_T Nexus must be the reservation holder.
3557 *
3558 * From spc4r17 section 5.7.8 Table 50 --
3559 * Register behaviors for a REGISTER AND MOVE service action
3560 */
3561 if (pr_res_holder != pr_reg) {
3562 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3563 " Nexus is not reservation holder\n");
3564 spin_unlock(&dev->dev_reservation_lock);
3565 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
3566 goto out;
3567 }
3568 /*
3569 * From spc4r17 section 5.7.8: registering and moving reservation
3570 *
3571 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3572 * action is received and the established persistent reservation is a
3573 * Write Exclusive - All Registrants type or Exclusive Access -
3574 * All Registrants type reservation, then the command shall be completed
3575 * with RESERVATION CONFLICT status.
3576 */
3577 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3578 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
3579 printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
3580 " reservation for type: %s\n",
3581 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3582 spin_unlock(&dev->dev_reservation_lock);
3583 ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
3584 goto out;
3585 }
3586 pr_res_nacl = pr_res_holder->pr_reg_nacl;
3587 /*
3588 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
3589 */
3590 type = pr_res_holder->pr_res_type;
3591 scope = pr_res_holder->pr_res_type;
3592 /*
3593 * c) Associate the reservation key specified in the SERVICE ACTION
3594 * RESERVATION KEY field with the I_T nexus specified as the
3595 * destination of the register and move, where:
3596 * A) The I_T nexus is specified by the TransportID and the
3597 * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
3598 * B) Regardless of the TransportID format used, the association for
3599 * the initiator port is based on either the initiator port name
3600 * (see 3.1.71) on SCSI transport protocols where port names are
3601 * required or the initiator port identifier (see 3.1.70) on SCSI
3602 * transport protocols where port names are not required;
3603 * d) Register the reservation key specified in the SERVICE ACTION
3604 * RESERVATION KEY field;
3605 * e) Retain the reservation key specified in the SERVICE ACTION
3606 * RESERVATION KEY field and associated information;
3607 *
3608 * Also, It is not an error for a REGISTER AND MOVE service action to
3609 * register an I_T nexus that is already registered with the same
3610 * reservation key or a different reservation key.
3611 */
3612 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3613 iport_ptr);
3614 if (!(dest_pr_reg)) {
3615 ret = core_scsi3_alloc_registration(SE_DEV(cmd),
3616 dest_node_acl, dest_se_deve, iport_ptr,
3617 sa_res_key, 0, aptpl, 2, 1);
3618 if (ret != 0) {
3619 spin_unlock(&dev->dev_reservation_lock);
3620 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3621 goto out;
3622 }
3623 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3624 iport_ptr);
3625 new_reg = 1;
3626 }
3627 /*
3628 * f) Release the persistent reservation for the persistent reservation
3629 * holder (i.e., the I_T nexus on which the
3630 */
3631 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
3632 dev->dev_pr_res_holder, 0);
3633 /*
3634 * g) Move the persistent reservation to the specified I_T nexus using
3635 * the same scope and type as the persistent reservation released in
3636 * item f); and
3637 */
3638 dev->dev_pr_res_holder = dest_pr_reg;
3639 dest_pr_reg->pr_res_holder = 1;
3640 dest_pr_reg->pr_res_type = type;
3641 pr_reg->pr_res_scope = scope;
3642 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
3643 PR_REG_ISID_ID_LEN);
3644 /*
3645 * Increment PRGeneration for existing registrations..
3646 */
3647 if (!(new_reg))
3648 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
3649 spin_unlock(&dev->dev_reservation_lock);
3650
3651 printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3652 " created new reservation holder TYPE: %s on object RTPI:"
3653 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
3654 core_scsi3_pr_dump_type(type), rtpi,
3655 dest_pr_reg->pr_res_generation);
3656 printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
3657 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3658 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
3659 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
3660 dest_node_acl->initiatorname, (iport_ptr != NULL) ?
3661 iport_ptr : "");
3662 /*
3663 * It is now safe to release configfs group dependencies for destination
3664 * of Transport ID Initiator Device/Port Identifier
3665 */
3666 core_scsi3_lunacl_undepend_item(dest_se_deve);
3667 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3668 core_scsi3_tpg_undepend_item(dest_se_tpg);
3669 /*
3670 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
3671 * nexus on which PERSISTENT RESERVE OUT command was received.
3672 */
3673 if (unreg) {
3674 spin_lock(&pr_tmpl->registration_lock);
3675 __core_scsi3_free_registration(dev, pr_reg, NULL, 1);
3676 spin_unlock(&pr_tmpl->registration_lock);
3677 } else
3678 core_scsi3_put_pr_reg(pr_reg);
3679
3680 /*
3681 * Clear the APTPL metadata if APTPL has been disabled, otherwise
3682 * write out the updated metadata to struct file for this SCSI device.
3683 */
3684 if (!(aptpl)) {
3685 pr_tmpl->pr_aptpl_active = 0;
3686 core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
3687 printk("SPC-3 PR: Set APTPL Bit Deactivated for"
3688 " REGISTER_AND_MOVE\n");
3689 } else {
3690 pr_tmpl->pr_aptpl_active = 1;
3691 ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
3692 &dest_pr_reg->pr_aptpl_buf[0],
3693 pr_tmpl->pr_aptpl_buf_len);
3694 if (!(ret))
3695 printk("SPC-3 PR: Set APTPL Bit Activated for"
3696 " REGISTER_AND_MOVE\n");
3697 }
3698
3699 core_scsi3_put_pr_reg(dest_pr_reg);
3700 return 0;
3701out:
3702 if (dest_se_deve)
3703 core_scsi3_lunacl_undepend_item(dest_se_deve);
3704 if (dest_node_acl)
3705 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3706 core_scsi3_tpg_undepend_item(dest_se_tpg);
3707 core_scsi3_put_pr_reg(pr_reg);
3708 return ret;
3709}
3710
3711static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3712{
3713 unsigned int __v1, __v2;
3714
3715 __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
3716 __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
3717
3718 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
3719}
3720
3721/*
3722 * See spc4r17 section 6.14 Table 170
3723 */
3724static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
3725{
3726 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3727 u64 res_key, sa_res_key;
3728 int sa, scope, type, aptpl;
3729 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
3730 /*
3731 * FIXME: A NULL struct se_session pointer means an this is not coming from
3732 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3733 */
3734 if (!(SE_SESS(cmd)))
3735 return PYX_TRANSPORT_LU_COMM_FAILURE;
3736
3737 if (cmd->data_length < 24) {
3738 printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
3739 " length too small: %u\n", cmd->data_length);
3740 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3741 }
3742 /*
3743 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
3744 */
3745 sa = (cdb[1] & 0x1f);
3746 scope = (cdb[2] & 0xf0);
3747 type = (cdb[2] & 0x0f);
3748 /*
3749 * From PERSISTENT_RESERVE_OUT parameter list (payload)
3750 */
3751 res_key = core_scsi3_extract_reservation_key(&buf[0]);
3752 sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
3753 /*
3754 * REGISTER_AND_MOVE uses a different SA parameter list containing
3755 * SCSI TransportIDs.
3756 */
3757 if (sa != PRO_REGISTER_AND_MOVE) {
3758 spec_i_pt = (buf[20] & 0x08);
3759 all_tg_pt = (buf[20] & 0x04);
3760 aptpl = (buf[20] & 0x01);
3761 } else {
3762 aptpl = (buf[17] & 0x01);
3763 unreg = (buf[17] & 0x02);
3764 }
3765 /*
3766 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3767 */
3768 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
3769 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3770 /*
3771 * From spc4r17 section 6.14:
3772 *
3773 * If the SPEC_I_PT bit is set to zero, the service action is not
3774 * REGISTER AND MOVE, and the parameter list length is not 24, then
3775 * the command shall be terminated with CHECK CONDITION status, with
3776 * the sense key set to ILLEGAL REQUEST, and the additional sense
3777 * code set to PARAMETER LIST LENGTH ERROR.
3778 */
3779 if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
3780 (cmd->data_length != 24)) {
3781 printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
3782 " list length: %u\n", cmd->data_length);
3783 return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
3784 }
3785 /*
3786 * (core_scsi3_emulate_pro_* function parameters
3787 * are defined by spc4r17 Table 174:
3788 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
3789 */
3790 switch (sa) {
3791 case PRO_REGISTER:
3792 return core_scsi3_emulate_pro_register(cmd,
3793 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
3794 case PRO_RESERVE:
3795 return core_scsi3_emulate_pro_reserve(cmd,
3796 type, scope, res_key);
3797 case PRO_RELEASE:
3798 return core_scsi3_emulate_pro_release(cmd,
3799 type, scope, res_key);
3800 case PRO_CLEAR:
3801 return core_scsi3_emulate_pro_clear(cmd, res_key);
3802 case PRO_PREEMPT:
3803 return core_scsi3_emulate_pro_preempt(cmd, type, scope,
3804 res_key, sa_res_key, 0);
3805 case PRO_PREEMPT_AND_ABORT:
3806 return core_scsi3_emulate_pro_preempt(cmd, type, scope,
3807 res_key, sa_res_key, 1);
3808 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
3809 return core_scsi3_emulate_pro_register(cmd,
3810 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
3811 case PRO_REGISTER_AND_MOVE:
3812 return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
3813 sa_res_key, aptpl, unreg);
3814 default:
3815 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
3816 " action: 0x%02x\n", cdb[1] & 0x1f);
3817 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3818 }
3819
3820 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3821}
3822
3823/*
3824 * PERSISTENT_RESERVE_IN Service Action READ_KEYS
3825 *
3826 * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
3827 */
3828static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3829{
3830 struct se_device *se_dev = SE_DEV(cmd);
3831 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
3832 struct t10_pr_registration *pr_reg;
3833 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3834 u32 add_len = 0, off = 8;
3835
3836 if (cmd->data_length < 8) {
3837 printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
3838 " too small\n", cmd->data_length);
3839 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3840 }
3841
3842 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
3843 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
3844 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
3845 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
3846
3847 spin_lock(&T10_RES(su_dev)->registration_lock);
3848 list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
3849 pr_reg_list) {
3850 /*
3851 * Check for overflow of 8byte PRI READ_KEYS payload and
3852 * next reservation key list descriptor.
3853 */
3854 if ((add_len + 8) > (cmd->data_length - 8))
3855 break;
3856
3857 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
3858 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
3859 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
3860 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
3861 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
3862 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
3863 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
3864 buf[off++] = (pr_reg->pr_res_key & 0xff);
3865
3866 add_len += 8;
3867 }
3868 spin_unlock(&T10_RES(su_dev)->registration_lock);
3869
3870 buf[4] = ((add_len >> 24) & 0xff);
3871 buf[5] = ((add_len >> 16) & 0xff);
3872 buf[6] = ((add_len >> 8) & 0xff);
3873 buf[7] = (add_len & 0xff);
3874
3875 return 0;
3876}
3877
3878/*
3879 * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
3880 *
3881 * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
3882 */
3883static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3884{
3885 struct se_device *se_dev = SE_DEV(cmd);
3886 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
3887 struct t10_pr_registration *pr_reg;
3888 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3889 u64 pr_res_key;
3890 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
3891
3892 if (cmd->data_length < 8) {
3893 printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3894 " too small\n", cmd->data_length);
3895 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3896 }
3897
3898 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
3899 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
3900 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
3901 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
3902
3903 spin_lock(&se_dev->dev_reservation_lock);
3904 pr_reg = se_dev->dev_pr_res_holder;
3905 if ((pr_reg)) {
3906 /*
3907 * Set the hardcoded Additional Length
3908 */
3909 buf[4] = ((add_len >> 24) & 0xff);
3910 buf[5] = ((add_len >> 16) & 0xff);
3911 buf[6] = ((add_len >> 8) & 0xff);
3912 buf[7] = (add_len & 0xff);
3913
3914 if (cmd->data_length < 22) {
3915 spin_unlock(&se_dev->dev_reservation_lock);
3916 return 0;
3917 }
3918 /*
3919 * Set the Reservation key.
3920 *
3921 * From spc4r17, section 5.7.10:
3922 * A persistent reservation holder has its reservation key
3923 * returned in the parameter data from a PERSISTENT
3924 * RESERVE IN command with READ RESERVATION service action as
3925 * follows:
3926 * a) For a persistent reservation of the type Write Exclusive
3927 * - All Registrants or Exclusive Access ­ All Regitrants,
3928 * the reservation key shall be set to zero; or
3929 * b) For all other persistent reservation types, the
3930 * reservation key shall be set to the registered
3931 * reservation key for the I_T nexus that holds the
3932 * persistent reservation.
3933 */
3934 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3935 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
3936 pr_res_key = 0;
3937 else
3938 pr_res_key = pr_reg->pr_res_key;
3939
3940 buf[8] = ((pr_res_key >> 56) & 0xff);
3941 buf[9] = ((pr_res_key >> 48) & 0xff);
3942 buf[10] = ((pr_res_key >> 40) & 0xff);
3943 buf[11] = ((pr_res_key >> 32) & 0xff);
3944 buf[12] = ((pr_res_key >> 24) & 0xff);
3945 buf[13] = ((pr_res_key >> 16) & 0xff);
3946 buf[14] = ((pr_res_key >> 8) & 0xff);
3947 buf[15] = (pr_res_key & 0xff);
3948 /*
3949 * Set the SCOPE and TYPE
3950 */
3951 buf[21] = (pr_reg->pr_res_scope & 0xf0) |
3952 (pr_reg->pr_res_type & 0x0f);
3953 }
3954 spin_unlock(&se_dev->dev_reservation_lock);
3955
3956 return 0;
3957}
3958
3959/*
3960 * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
3961 *
3962 * See spc4r17 section 6.13.4 Table 165
3963 */
3964static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3965{
3966 struct se_device *dev = SE_DEV(cmd);
3967 struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
3968 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
3969 u16 add_len = 8; /* Hardcoded to 8. */
3970
3971 if (cmd->data_length < 6) {
3972 printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
3973 " %u too small\n", cmd->data_length);
3974 return PYX_TRANSPORT_INVALID_CDB_FIELD;
3975 }
3976
3977 buf[0] = ((add_len << 8) & 0xff);
3978 buf[1] = (add_len & 0xff);
3979 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3980 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
3981 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
3982 buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
3983 /*
3984 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
3985 * set the TMV: Task Mask Valid bit.
3986 */
3987 buf[3] |= 0x80;
3988 /*
3989 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
3990 */
3991 buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
3992 /*
3993 * PTPL_A: Persistence across Target Power Loss Active bit
3994 */
3995 if (pr_tmpl->pr_aptpl_active)
3996 buf[3] |= 0x01;
3997 /*
3998 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
3999 */
4000 buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4001 buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
4002 buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
4003 buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
4004 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
4005 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4006
4007 return 0;
4008}
4009
4010/*
4011 * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
4012 *
4013 * See spc4r17 section 6.13.5 Table 168 and 169
4014 */
4015static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4016{
4017 struct se_device *se_dev = SE_DEV(cmd);
4018 struct se_node_acl *se_nacl;
4019 struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
4020 struct se_portal_group *se_tpg;
4021 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4022 struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
4023 unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
4024 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4025 u32 off = 8; /* off into first Full Status descriptor */
4026 int format_code = 0;
4027
4028 if (cmd->data_length < 8) {
4029 printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4030 " too small\n", cmd->data_length);
4031 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4032 }
4033
4034 buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
4035 buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
4036 buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
4037 buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
4038
4039 spin_lock(&pr_tmpl->registration_lock);
4040 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
4041 &pr_tmpl->registration_list, pr_reg_list) {
4042
4043 se_nacl = pr_reg->pr_reg_nacl;
4044 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
4045 add_desc_len = 0;
4046
4047 atomic_inc(&pr_reg->pr_res_holders);
4048 smp_mb__after_atomic_inc();
4049 spin_unlock(&pr_tmpl->registration_lock);
4050 /*
4051 * Determine expected length of $FABRIC_MOD specific
4052 * TransportID full status descriptor..
4053 */
4054 exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
4055 se_tpg, se_nacl, pr_reg, &format_code);
4056
4057 if ((exp_desc_len + add_len) > cmd->data_length) {
4058 printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
4059 " out of buffer: %d\n", cmd->data_length);
4060 spin_lock(&pr_tmpl->registration_lock);
4061 atomic_dec(&pr_reg->pr_res_holders);
4062 smp_mb__after_atomic_dec();
4063 break;
4064 }
4065 /*
4066 * Set RESERVATION KEY
4067 */
4068 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
4069 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
4070 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
4071 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
4072 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
4073 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
4074 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
4075 buf[off++] = (pr_reg->pr_res_key & 0xff);
4076 off += 4; /* Skip Over Reserved area */
4077
4078 /*
4079 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
4080 */
4081 if (pr_reg->pr_reg_all_tg_pt)
4082 buf[off] = 0x02;
4083 /*
4084 * The struct se_lun pointer will be present for the
4085 * reservation holder for PR_HOLDER bit.
4086 *
4087 * Also, if this registration is the reservation
4088 * holder, fill in SCOPE and TYPE in the next byte.
4089 */
4090 if (pr_reg->pr_res_holder) {
4091 buf[off++] |= 0x01;
4092 buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
4093 (pr_reg->pr_res_type & 0x0f);
4094 } else
4095 off += 2;
4096
4097 off += 4; /* Skip over reserved area */
4098 /*
4099 * From spc4r17 6.3.15:
4100 *
4101 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
4102 * IDENTIFIER field contains the relative port identifier (see
4103 * 3.1.120) of the target port that is part of the I_T nexus
4104 * described by this full status descriptor. If the ALL_TG_PT
4105 * bit is set to one, the contents of the RELATIVE TARGET PORT
4106 * IDENTIFIER field are not defined by this standard.
4107 */
4108 if (!(pr_reg->pr_reg_all_tg_pt)) {
4109 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
4110
4111 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
4112 buf[off++] = (port->sep_rtpi & 0xff);
4113 } else
4114 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
4115
4116 /*
4117 * Now, have the $FABRIC_MOD fill in the protocol identifier
4118 */
4119 desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
4120 se_nacl, pr_reg, &format_code, &buf[off+4]);
4121
4122 spin_lock(&pr_tmpl->registration_lock);
4123 atomic_dec(&pr_reg->pr_res_holders);
4124 smp_mb__after_atomic_dec();
4125 /*
4126 * Set the ADDITIONAL DESCRIPTOR LENGTH
4127 */
4128 buf[off++] = ((desc_len >> 24) & 0xff);
4129 buf[off++] = ((desc_len >> 16) & 0xff);
4130 buf[off++] = ((desc_len >> 8) & 0xff);
4131 buf[off++] = (desc_len & 0xff);
4132 /*
4133 * Size of full desctipor header minus TransportID
4134 * containing $FABRIC_MOD specific) initiator device/port
4135 * WWN information.
4136 *
4137 * See spc4r17 Section 6.13.5 Table 169
4138 */
4139 add_desc_len = (24 + desc_len);
4140
4141 off += desc_len;
4142 add_len += add_desc_len;
4143 }
4144 spin_unlock(&pr_tmpl->registration_lock);
4145 /*
4146 * Set ADDITIONAL_LENGTH
4147 */
4148 buf[4] = ((add_len >> 24) & 0xff);
4149 buf[5] = ((add_len >> 16) & 0xff);
4150 buf[6] = ((add_len >> 8) & 0xff);
4151 buf[7] = (add_len & 0xff);
4152
4153 return 0;
4154}
4155
4156static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
4157{
4158 switch (cdb[1] & 0x1f) {
4159 case PRI_READ_KEYS:
4160 return core_scsi3_pri_read_keys(cmd);
4161 case PRI_READ_RESERVATION:
4162 return core_scsi3_pri_read_reservation(cmd);
4163 case PRI_REPORT_CAPABILITIES:
4164 return core_scsi3_pri_report_capabilities(cmd);
4165 case PRI_READ_FULL_STATUS:
4166 return core_scsi3_pri_read_full_status(cmd);
4167 default:
4168 printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
4169 " action: 0x%02x\n", cdb[1] & 0x1f);
4170 return PYX_TRANSPORT_INVALID_CDB_FIELD;
4171 }
4172
4173}
4174
4175int core_scsi3_emulate_pr(struct se_cmd *cmd)
4176{
4177 unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
4178 struct se_device *dev = cmd->se_dev;
4179 /*
4180 * Following spc2r20 5.5.1 Reservations overview:
4181 *
4182 * If a logical unit has been reserved by any RESERVE command and is
4183 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
4184 * PERSISTENT RESERVE OUT commands shall conflict regardless of
4185 * initiator or service action and shall terminate with a RESERVATION
4186 * CONFLICT status.
4187 */
4188 if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
4189 printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
4190 " SPC-2 reservation is held, returning"
4191 " RESERVATION_CONFLICT\n");
4192 return PYX_TRANSPORT_RESERVATION_CONFLICT;
4193 }
4194
4195 return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
4196 core_scsi3_emulate_pr_out(cmd, cdb) :
4197 core_scsi3_emulate_pr_in(cmd, cdb);
4198}
4199
4200static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
4201{
4202 return 0;
4203}
4204
4205static int core_pt_seq_non_holder(
4206 struct se_cmd *cmd,
4207 unsigned char *cdb,
4208 u32 pr_reg_type)
4209{
4210 return 0;
4211}
4212
4213int core_setup_reservations(struct se_device *dev, int force_pt)
4214{
4215 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
4216 struct t10_reservation_template *rest = &su_dev->t10_reservation;
4217 /*
4218 * If this device is from Target_Core_Mod/pSCSI, use the reservations
4219 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
4220 * cause a problem because libata and some SATA RAID HBAs appear
4221 * under Linux/SCSI, but to emulate reservations themselves.
4222 */
4223 if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
4224 !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
4225 rest->res_type = SPC_PASSTHROUGH;
4226 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
4227 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
4228 printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
4229 " emulation\n", TRANSPORT(dev)->name);
4230 return 0;
4231 }
4232 /*
4233 * If SPC-3 or above is reported by real or emulated struct se_device,
4234 * use emulated Persistent Reservations.
4235 */
4236 if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
4237 rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
4238 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
4239 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
4240 printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
4241 " emulation\n", TRANSPORT(dev)->name);
4242 } else {
4243 rest->res_type = SPC2_RESERVATIONS;
4244 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
4245 rest->pr_ops.t10_seq_non_holder =
4246 &core_scsi2_reservation_seq_non_holder;
4247 printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
4248 TRANSPORT(dev)->name);
4249 }
4250
4251 return 0;
4252}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 000000000000..5603bcfd86d3
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
1#ifndef TARGET_CORE_PR_H
2#define TARGET_CORE_PR_H
3/*
4 * PERSISTENT_RESERVE_OUT service action codes
5 *
6 * spc4r17 section 6.14.2 Table 171
7 */
8#define PRO_REGISTER 0x00
9#define PRO_RESERVE 0x01
10#define PRO_RELEASE 0x02
11#define PRO_CLEAR 0x03
12#define PRO_PREEMPT 0x04
13#define PRO_PREEMPT_AND_ABORT 0x05
14#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
15#define PRO_REGISTER_AND_MOVE 0x07
16/*
17 * PERSISTENT_RESERVE_IN service action codes
18 *
19 * spc4r17 section 6.13.1 Table 159
20 */
21#define PRI_READ_KEYS 0x00
22#define PRI_READ_RESERVATION 0x01
23#define PRI_REPORT_CAPABILITIES 0x02
24#define PRI_READ_FULL_STATUS 0x03
25/*
26 * PERSISTENT_RESERVE_ SCOPE field
27 *
28 * spc4r17 section 6.13.3.3 Table 163
29 */
30#define PR_SCOPE_LU_SCOPE 0x00
31/*
32 * PERSISTENT_RESERVE_* TYPE field
33 *
34 * spc4r17 section 6.13.3.4 Table 164
35 */
36#define PR_TYPE_WRITE_EXCLUSIVE 0x01
37#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
38#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
39#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
40#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
41#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
42
43#define PR_APTPL_MAX_IPORT_LEN 256
44#define PR_APTPL_MAX_TPORT_LEN 256
45
46extern struct kmem_cache *t10_pr_reg_cache;
47
48extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
49 char *, u32);
50extern int core_scsi2_emulate_crh(struct se_cmd *);
51extern int core_scsi3_alloc_aptpl_registration(
52 struct t10_reservation_template *, u64,
53 unsigned char *, unsigned char *, u32,
54 unsigned char *, u16, u32, int, int, u8);
55extern int core_scsi3_check_aptpl_registration(struct se_device *,
56 struct se_portal_group *, struct se_lun *,
57 struct se_lun_acl *);
58extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
59 struct se_node_acl *);
60extern void core_scsi3_free_all_registrations(struct se_device *);
61extern unsigned char *core_scsi3_pr_dump_type(int);
62extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
63 struct se_cmd *);
64extern int core_scsi3_emulate_pr(struct se_cmd *);
65extern int core_setup_reservations(struct se_device *, int);
66
67#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 000000000000..742d24609a9b
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
1/*******************************************************************************
2 * Filename: target_core_pscsi.c
3 *
4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/string.h>
31#include <linux/parser.h>
32#include <linux/timer.h>
33#include <linux/blkdev.h>
34#include <linux/blk_types.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/smp_lock.h>
38#include <linux/genhd.h>
39#include <linux/cdrom.h>
40#include <linux/file.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_host.h>
45#include <scsi/libsas.h> /* For TASK_ATTR_* */
46
47#include <target/target_core_base.h>
48#include <target/target_core_device.h>
49#include <target/target_core_transport.h>
50
51#include "target_core_pscsi.h"
52
53#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
54
55static struct se_subsystem_api pscsi_template;
56
57static void pscsi_req_done(struct request *, int);
58
59/* pscsi_get_sh():
60 *
61 *
62 */
63static struct Scsi_Host *pscsi_get_sh(u32 host_no)
64{
65 struct Scsi_Host *sh = NULL;
66
67 sh = scsi_host_lookup(host_no);
68 if (IS_ERR(sh)) {
69 printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
70 " %u\n", host_no);
71 return NULL;
72 }
73
74 return sh;
75}
76
77/* pscsi_attach_hba():
78 *
79 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
80 * from the passed SCSI Host ID.
81 */
82static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
83{
84 int hba_depth;
85 struct pscsi_hba_virt *phv;
86
87 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
88 if (!(phv)) {
89 printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
90 return -1;
91 }
92 phv->phv_host_id = host_id;
93 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
94 hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
95 atomic_set(&hba->left_queue_depth, hba_depth);
96 atomic_set(&hba->max_queue_depth, hba_depth);
97
98 hba->hba_ptr = (void *)phv;
99
100 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
101 " Generic Target Core Stack %s\n", hba->hba_id,
102 PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
103 printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
104 " Target Core with TCQ Depth: %d\n", hba->hba_id,
105 atomic_read(&hba->max_queue_depth));
106
107 return 0;
108}
109
110static void pscsi_detach_hba(struct se_hba *hba)
111{
112 struct pscsi_hba_virt *phv = hba->hba_ptr;
113 struct Scsi_Host *scsi_host = phv->phv_lld_host;
114
115 if (scsi_host) {
116 scsi_host_put(scsi_host);
117
118 printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
119 " Generic Target Core\n", hba->hba_id,
120 (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
121 "Unknown");
122 } else
123 printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
124 " from Generic Target Core\n", hba->hba_id);
125
126 kfree(phv);
127 hba->hba_ptr = NULL;
128}
129
130static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
131{
132 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
133 struct Scsi_Host *sh = phv->phv_lld_host;
134 int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
135 /*
136 * Release the struct Scsi_Host
137 */
138 if (!(mode_flag)) {
139 if (!(sh))
140 return 0;
141
142 phv->phv_lld_host = NULL;
143 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
144 atomic_set(&hba->left_queue_depth, hba_depth);
145 atomic_set(&hba->max_queue_depth, hba_depth);
146
147 printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
148 " %s\n", hba->hba_id, (sh->hostt->name) ?
149 (sh->hostt->name) : "Unknown");
150
151 scsi_host_put(sh);
152 return 0;
153 }
154 /*
155 * Otherwise, locate struct Scsi_Host from the original passed
156 * pSCSI Host ID and enable for phba mode
157 */
158 sh = pscsi_get_sh(phv->phv_host_id);
159 if (!(sh)) {
160 printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
161 " phv_host_id: %d\n", phv->phv_host_id);
162 return -1;
163 }
164 /*
165 * Usually the SCSI LLD will use the hostt->can_queue value to define
166 * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
167 * this at all and set sh->can_queue at runtime.
168 */
169 hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
170 sh->hostt->can_queue : sh->can_queue;
171
172 atomic_set(&hba->left_queue_depth, hba_depth);
173 atomic_set(&hba->max_queue_depth, hba_depth);
174
175 phv->phv_lld_host = sh;
176 phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
177
178 printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
179 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
180
181 return 1;
182}
183
184static void pscsi_tape_read_blocksize(struct se_device *dev,
185 struct scsi_device *sdev)
186{
187 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
188 int ret;
189
190 buf = kzalloc(12, GFP_KERNEL);
191 if (!buf)
192 return;
193
194 memset(cdb, 0, MAX_COMMAND_SIZE);
195 cdb[0] = MODE_SENSE;
196 cdb[4] = 0x0c; /* 12 bytes */
197
198 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
199 HZ, 1, NULL);
200 if (ret)
201 goto out_free;
202
203 /*
204 * If MODE_SENSE still returns zero, set the default value to 1024.
205 */
206 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
207 if (!sdev->sector_size)
208 sdev->sector_size = 1024;
209out_free:
210 kfree(buf);
211}
212
213static void
214pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
215{
216 unsigned char *buf;
217
218 if (sdev->inquiry_len < INQUIRY_LEN)
219 return;
220
221 buf = sdev->inquiry;
222 if (!buf)
223 return;
224 /*
225 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
226 */
227 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
228 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
229 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
230}
231
232static int
233pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
234{
235 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
236 int ret;
237
238 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
239 if (!buf)
240 return -1;
241
242 memset(cdb, 0, MAX_COMMAND_SIZE);
243 cdb[0] = INQUIRY;
244 cdb[1] = 0x01; /* Query VPD */
245 cdb[2] = 0x80; /* Unit Serial Number */
246 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
247 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
248
249 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
250 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
251 if (ret)
252 goto out_free;
253
254 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
255
256 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
257
258 kfree(buf);
259 return 0;
260
261out_free:
262 kfree(buf);
263 return -1;
264}
265
266static void
267pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
268 struct t10_wwn *wwn)
269{
270 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
271 int ident_len, page_len, off = 4, ret;
272 struct t10_vpd *vpd;
273
274 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
275 if (!buf)
276 return;
277
278 memset(cdb, 0, MAX_COMMAND_SIZE);
279 cdb[0] = INQUIRY;
280 cdb[1] = 0x01; /* Query VPD */
281 cdb[2] = 0x83; /* Device Identifier */
282 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
283 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
284
285 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
286 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
287 NULL, HZ, 1, NULL);
288 if (ret)
289 goto out;
290
291 page_len = (buf[2] << 8) | buf[3];
292 while (page_len > 0) {
293 /* Grab a pointer to the Identification descriptor */
294 page_83 = &buf[off];
295 ident_len = page_83[3];
296 if (!ident_len) {
297 printk(KERN_ERR "page_83[3]: identifier"
298 " length zero!\n");
299 break;
300 }
301 printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
302
303 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
304 if (!vpd) {
305 printk(KERN_ERR "Unable to allocate memory for"
306 " struct t10_vpd\n");
307 goto out;
308 }
309 INIT_LIST_HEAD(&vpd->vpd_list);
310
311 transport_set_vpd_proto_id(vpd, page_83);
312 transport_set_vpd_assoc(vpd, page_83);
313
314 if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
315 off += (ident_len + 4);
316 page_len -= (ident_len + 4);
317 kfree(vpd);
318 continue;
319 }
320 if (transport_set_vpd_ident(vpd, page_83) < 0) {
321 off += (ident_len + 4);
322 page_len -= (ident_len + 4);
323 kfree(vpd);
324 continue;
325 }
326
327 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
328 off += (ident_len + 4);
329 page_len -= (ident_len + 4);
330 }
331
332out:
333 kfree(buf);
334}
335
336/* pscsi_add_device_to_list():
337 *
338 *
339 */
340static struct se_device *pscsi_add_device_to_list(
341 struct se_hba *hba,
342 struct se_subsystem_dev *se_dev,
343 struct pscsi_dev_virt *pdv,
344 struct scsi_device *sd,
345 int dev_flags)
346{
347 struct se_device *dev;
348 struct se_dev_limits dev_limits;
349 struct request_queue *q;
350 struct queue_limits *limits;
351
352 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
353
354 if (!sd->queue_depth) {
355 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
356
357 printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
358 " queue_depth to %d\n", sd->channel, sd->id,
359 sd->lun, sd->queue_depth);
360 }
361 /*
362 * Setup the local scope queue_limits from struct request_queue->limits
363 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
364 */
365 q = sd->request_queue;
366 limits = &dev_limits.limits;
367 limits->logical_block_size = sd->sector_size;
368 limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
369 queue_max_hw_sectors(q) : sd->host->max_sectors;
370 limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
371 queue_max_sectors(q) : sd->host->max_sectors;
372 dev_limits.hw_queue_depth = sd->queue_depth;
373 dev_limits.queue_depth = sd->queue_depth;
374 /*
375 * Setup our standard INQUIRY info into se_dev->t10_wwn
376 */
377 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
378
379 /*
380 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
381 * which has already been referenced with Linux SCSI code with
382 * scsi_device_get() in this file's pscsi_create_virtdevice().
383 *
384 * The passthrough operations called by the transport_add_device_*
385 * function below will require this pointer to be set for passthroug
386 * ops.
387 *
388 * For the shutdown case in pscsi_free_device(), this struct
389 * scsi_device reference is released with Linux SCSI code
390 * scsi_device_put() and the pdv->pdv_sd cleared.
391 */
392 pdv->pdv_sd = sd;
393
394 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
395 se_dev, dev_flags, (void *)pdv,
396 &dev_limits, NULL, NULL);
397 if (!(dev)) {
398 pdv->pdv_sd = NULL;
399 return NULL;
400 }
401
402 /*
403 * Locate VPD WWN Information used for various purposes within
404 * the Storage Engine.
405 */
406 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
407 /*
408 * If VPD Unit Serial returned GOOD status, try
409 * VPD Device Identification page (0x83).
410 */
411 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
412 }
413
414 /*
415 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
416 */
417 if (sd->type == TYPE_TAPE)
418 pscsi_tape_read_blocksize(dev, sd);
419 return dev;
420}
421
422static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
423{
424 struct pscsi_dev_virt *pdv;
425
426 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
427 if (!(pdv)) {
428 printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
429 return NULL;
430 }
431 pdv->pdv_se_hba = hba;
432
433 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
434 return (void *)pdv;
435}
436
437/*
438 * Called with struct Scsi_Host->host_lock called.
439 */
440static struct se_device *pscsi_create_type_disk(
441 struct scsi_device *sd,
442 struct pscsi_dev_virt *pdv,
443 struct se_subsystem_dev *se_dev,
444 struct se_hba *hba)
445{
446 struct se_device *dev;
447 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
448 struct Scsi_Host *sh = sd->host;
449 struct block_device *bd;
450 u32 dev_flags = 0;
451
452 if (scsi_device_get(sd)) {
453 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
454 sh->host_no, sd->channel, sd->id, sd->lun);
455 spin_unlock_irq(sh->host_lock);
456 return NULL;
457 }
458 spin_unlock_irq(sh->host_lock);
459 /*
460 * Claim exclusive struct block_device access to struct scsi_device
461 * for TYPE_DISK using supplied udev_path
462 */
463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
465 if (!(bd)) {
466 printk("pSCSI: blkdev_get_by_path() failed\n");
467 scsi_device_put(sd);
468 return NULL;
469 }
470 pdv->pdv_bd = bd;
471
472 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
473 if (!(dev)) {
474 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
475 scsi_device_put(sd);
476 return NULL;
477 }
478 printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
479 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
480
481 return dev;
482}
483
484/*
485 * Called with struct Scsi_Host->host_lock called.
486 */
487static struct se_device *pscsi_create_type_rom(
488 struct scsi_device *sd,
489 struct pscsi_dev_virt *pdv,
490 struct se_subsystem_dev *se_dev,
491 struct se_hba *hba)
492{
493 struct se_device *dev;
494 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
495 struct Scsi_Host *sh = sd->host;
496 u32 dev_flags = 0;
497
498 if (scsi_device_get(sd)) {
499 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
500 sh->host_no, sd->channel, sd->id, sd->lun);
501 spin_unlock_irq(sh->host_lock);
502 return NULL;
503 }
504 spin_unlock_irq(sh->host_lock);
505
506 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
507 if (!(dev)) {
508 scsi_device_put(sd);
509 return NULL;
510 }
511 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
512 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
513 sd->channel, sd->id, sd->lun);
514
515 return dev;
516}
517
518/*
519 *Called with struct Scsi_Host->host_lock called.
520 */
521static struct se_device *pscsi_create_type_other(
522 struct scsi_device *sd,
523 struct pscsi_dev_virt *pdv,
524 struct se_subsystem_dev *se_dev,
525 struct se_hba *hba)
526{
527 struct se_device *dev;
528 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
529 struct Scsi_Host *sh = sd->host;
530 u32 dev_flags = 0;
531
532 spin_unlock_irq(sh->host_lock);
533 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
534 if (!(dev))
535 return NULL;
536
537 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
538 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
539 sd->channel, sd->id, sd->lun);
540
541 return dev;
542}
543
544static struct se_device *pscsi_create_virtdevice(
545 struct se_hba *hba,
546 struct se_subsystem_dev *se_dev,
547 void *p)
548{
549 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
550 struct se_device *dev;
551 struct scsi_device *sd;
552 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
553 struct Scsi_Host *sh = phv->phv_lld_host;
554 int legacy_mode_enable = 0;
555
556 if (!(pdv)) {
557 printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
558 " parameter\n");
559 return NULL;
560 }
561 /*
562 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
563 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
564 */
565 if (!(sh)) {
566 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
567 printk(KERN_ERR "pSCSI: Unable to locate struct"
568 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
569 return NULL;
570 }
571 /*
572 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
573 * reference, we enforce that udev_path has been set
574 */
575 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
576 printk(KERN_ERR "pSCSI: udev_path attribute has not"
577 " been set before ENABLE=1\n");
578 return NULL;
579 }
580 /*
581 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
582 * use the original TCM hba ID to reference Linux/SCSI Host No
583 * and enable for PHV_LLD_SCSI_HOST_NO mode.
584 */
585 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
586 spin_lock(&hba->device_lock);
587 if (!(list_empty(&hba->hba_dev_list))) {
588 printk(KERN_ERR "pSCSI: Unable to set hba_mode"
589 " with active devices\n");
590 spin_unlock(&hba->device_lock);
591 return NULL;
592 }
593 spin_unlock(&hba->device_lock);
594
595 if (pscsi_pmode_enable_hba(hba, 1) != 1)
596 return NULL;
597
598 legacy_mode_enable = 1;
599 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
600 sh = phv->phv_lld_host;
601 } else {
602 sh = pscsi_get_sh(pdv->pdv_host_id);
603 if (!(sh)) {
604 printk(KERN_ERR "pSCSI: Unable to locate"
605 " pdv_host_id: %d\n", pdv->pdv_host_id);
606 return NULL;
607 }
608 }
609 } else {
610 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
611 printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
612 " struct Scsi_Host exists\n");
613 return NULL;
614 }
615 }
616
617 spin_lock_irq(sh->host_lock);
618 list_for_each_entry(sd, &sh->__devices, siblings) {
619 if ((pdv->pdv_channel_id != sd->channel) ||
620 (pdv->pdv_target_id != sd->id) ||
621 (pdv->pdv_lun_id != sd->lun))
622 continue;
623 /*
624 * Functions will release the held struct scsi_host->host_lock
625 * before calling calling pscsi_add_device_to_list() to register
626 * struct scsi_device with target_core_mod.
627 */
628 switch (sd->type) {
629 case TYPE_DISK:
630 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
631 break;
632 case TYPE_ROM:
633 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
634 break;
635 default:
636 dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
637 break;
638 }
639
640 if (!(dev)) {
641 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
642 scsi_host_put(sh);
643 else if (legacy_mode_enable) {
644 pscsi_pmode_enable_hba(hba, 0);
645 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
646 }
647 pdv->pdv_sd = NULL;
648 return NULL;
649 }
650 return dev;
651 }
652 spin_unlock_irq(sh->host_lock);
653
654 printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
655 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
656
657 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
658 scsi_host_put(sh);
659 else if (legacy_mode_enable) {
660 pscsi_pmode_enable_hba(hba, 0);
661 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
662 }
663
664 return NULL;
665}
666
667/* pscsi_free_device(): (Part of se_subsystem_api_t template)
668 *
669 *
670 */
671static void pscsi_free_device(void *p)
672{
673 struct pscsi_dev_virt *pdv = p;
674 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
675 struct scsi_device *sd = pdv->pdv_sd;
676
677 if (sd) {
678 /*
679 * Release exclusive pSCSI internal struct block_device claim for
680 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
681 */
682 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
683 blkdev_put(pdv->pdv_bd,
684 FMODE_WRITE|FMODE_READ|FMODE_EXCL);
685 pdv->pdv_bd = NULL;
686 }
687 /*
688 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
689 * to struct Scsi_Host now.
690 */
691 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
692 (phv->phv_lld_host != NULL))
693 scsi_host_put(phv->phv_lld_host);
694
695 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
696 scsi_device_put(sd);
697
698 pdv->pdv_sd = NULL;
699 }
700
701 kfree(pdv);
702}
703
704static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
705{
706 return container_of(task, struct pscsi_plugin_task, pscsi_task);
707}
708
709
710/* pscsi_transport_complete():
711 *
712 *
713 */
714static int pscsi_transport_complete(struct se_task *task)
715{
716 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
717 struct scsi_device *sd = pdv->pdv_sd;
718 int result;
719 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
720 unsigned char *cdb = &pt->pscsi_cdb[0];
721
722 result = pt->pscsi_result;
723 /*
724 * Hack to make sure that Write-Protect modepage is set if R/O mode is
725 * forced.
726 */
727 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
728 (status_byte(result) << 1) == SAM_STAT_GOOD) {
729 if (!TASK_CMD(task)->se_deve)
730 goto after_mode_sense;
731
732 if (TASK_CMD(task)->se_deve->lun_flags &
733 TRANSPORT_LUNFLAGS_READ_ONLY) {
734 unsigned char *buf = (unsigned char *)
735 T_TASK(task->task_se_cmd)->t_task_buf;
736
737 if (cdb[0] == MODE_SENSE_10) {
738 if (!(buf[3] & 0x80))
739 buf[3] |= 0x80;
740 } else {
741 if (!(buf[2] & 0x80))
742 buf[2] |= 0x80;
743 }
744 }
745 }
746after_mode_sense:
747
748 if (sd->type != TYPE_TAPE)
749 goto after_mode_select;
750
751 /*
752 * Hack to correctly obtain the initiator requested blocksize for
753 * TYPE_TAPE. Since this value is dependent upon each tape media,
754 * struct scsi_device->sector_size will not contain the correct value
755 * by default, so we go ahead and set it so
756 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
757 * storage engine.
758 */
759 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
760 (status_byte(result) << 1) == SAM_STAT_GOOD) {
761 unsigned char *buf;
762 struct scatterlist *sg = task->task_sg;
763 u16 bdl;
764 u32 blocksize;
765
766 buf = sg_virt(&sg[0]);
767 if (!(buf)) {
768 printk(KERN_ERR "Unable to get buf for scatterlist\n");
769 goto after_mode_select;
770 }
771
772 if (cdb[0] == MODE_SELECT)
773 bdl = (buf[3]);
774 else
775 bdl = (buf[6] << 8) | (buf[7]);
776
777 if (!bdl)
778 goto after_mode_select;
779
780 if (cdb[0] == MODE_SELECT)
781 blocksize = (buf[9] << 16) | (buf[10] << 8) |
782 (buf[11]);
783 else
784 blocksize = (buf[13] << 16) | (buf[14] << 8) |
785 (buf[15]);
786
787 sd->sector_size = blocksize;
788 }
789after_mode_select:
790
791 if (status_byte(result) & CHECK_CONDITION)
792 return 1;
793
794 return 0;
795}
796
797static struct se_task *
798pscsi_alloc_task(struct se_cmd *cmd)
799{
800 struct pscsi_plugin_task *pt;
801 unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
802
803 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
804 if (!pt) {
805 printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
806 return NULL;
807 }
808
809 /*
810 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
811 * allocate the extended CDB buffer for per struct se_task context
812 * pt->pscsi_cdb now.
813 */
814 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
815
816 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
817 if (!(pt->pscsi_cdb)) {
818 printk(KERN_ERR "pSCSI: Unable to allocate extended"
819 " pt->pscsi_cdb\n");
820 return NULL;
821 }
822 } else
823 pt->pscsi_cdb = &pt->__pscsi_cdb[0];
824
825 return &pt->pscsi_task;
826}
827
828static inline void pscsi_blk_init_request(
829 struct se_task *task,
830 struct pscsi_plugin_task *pt,
831 struct request *req,
832 int bidi_read)
833{
834 /*
835 * Defined as "scsi command" in include/linux/blkdev.h.
836 */
837 req->cmd_type = REQ_TYPE_BLOCK_PC;
838 /*
839 * For the extra BIDI-COMMAND READ struct request we do not
840 * need to setup the remaining structure members
841 */
842 if (bidi_read)
843 return;
844 /*
845 * Setup the done function pointer for struct request,
846 * also set the end_io_data pointer.to struct se_task.
847 */
848 req->end_io = pscsi_req_done;
849 req->end_io_data = (void *)task;
850 /*
851 * Load the referenced struct se_task's SCSI CDB into
852 * include/linux/blkdev.h:struct request->cmd
853 */
854 req->cmd_len = scsi_command_size(pt->pscsi_cdb);
855 req->cmd = &pt->pscsi_cdb[0];
856 /*
857 * Setup pointer for outgoing sense data.
858 */
859 req->sense = (void *)&pt->pscsi_sense[0];
860 req->sense_len = 0;
861}
862
863/*
864 * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
865*/
866static int pscsi_blk_get_request(struct se_task *task)
867{
868 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
869 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
870
871 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
872 (task->task_data_direction == DMA_TO_DEVICE),
873 GFP_KERNEL);
874 if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
875 printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
876 IS_ERR(pt->pscsi_req));
877 return PYX_TRANSPORT_LU_COMM_FAILURE;
878 }
879 /*
880 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
881 * and setup rq callback, CDB and sense.
882 */
883 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
884 return 0;
885}
886
887/* pscsi_do_task(): (Part of se_subsystem_api_t template)
888 *
889 *
890 */
891static int pscsi_do_task(struct se_task *task)
892{
893 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
894 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
895 /*
896 * Set the struct request->timeout value based on peripheral
897 * device type from SCSI.
898 */
899 if (pdv->pdv_sd->type == TYPE_DISK)
900 pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
901 else
902 pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
903
904 pt->pscsi_req->retries = PS_RETRY;
905 /*
906 * Queue the struct request into the struct scsi_device->request_queue.
907 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
908 * descriptor
909 */
910 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
911 (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
912 pscsi_req_done);
913
914 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
915}
916
917static void pscsi_free_task(struct se_task *task)
918{
919 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
920 struct se_cmd *cmd = task->task_se_cmd;
921
922 /*
923 * Release the extended CDB allocation from pscsi_alloc_task()
924 * if one exists.
925 */
926 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
927 kfree(pt->pscsi_cdb);
928 /*
929 * We do not release the bio(s) here associated with this task, as
930 * this is handled by bio_put() and pscsi_bi_endio().
931 */
932 kfree(pt);
933}
934
935enum {
936 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
937 Opt_scsi_lun_id, Opt_err
938};
939
940static match_table_t tokens = {
941 {Opt_scsi_host_id, "scsi_host_id=%d"},
942 {Opt_scsi_channel_id, "scsi_channel_id=%d"},
943 {Opt_scsi_target_id, "scsi_target_id=%d"},
944 {Opt_scsi_lun_id, "scsi_lun_id=%d"},
945 {Opt_err, NULL}
946};
947
948static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
949 struct se_subsystem_dev *se_dev,
950 const char *page,
951 ssize_t count)
952{
953 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
954 struct pscsi_hba_virt *phv = hba->hba_ptr;
955 char *orig, *ptr, *opts;
956 substring_t args[MAX_OPT_ARGS];
957 int ret = 0, arg, token;
958
959 opts = kstrdup(page, GFP_KERNEL);
960 if (!opts)
961 return -ENOMEM;
962
963 orig = opts;
964
965 while ((ptr = strsep(&opts, ",")) != NULL) {
966 if (!*ptr)
967 continue;
968
969 token = match_token(ptr, tokens, args);
970 switch (token) {
971 case Opt_scsi_host_id:
972 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
973 printk(KERN_ERR "PSCSI[%d]: Unable to accept"
974 " scsi_host_id while phv_mode =="
975 " PHV_LLD_SCSI_HOST_NO\n",
976 phv->phv_host_id);
977 ret = -EINVAL;
978 goto out;
979 }
980 match_int(args, &arg);
981 pdv->pdv_host_id = arg;
982 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
983 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
984 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
985 break;
986 case Opt_scsi_channel_id:
987 match_int(args, &arg);
988 pdv->pdv_channel_id = arg;
989 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
990 " ID: %d\n", phv->phv_host_id,
991 pdv->pdv_channel_id);
992 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
993 break;
994 case Opt_scsi_target_id:
995 match_int(args, &arg);
996 pdv->pdv_target_id = arg;
997 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
998 " ID: %d\n", phv->phv_host_id,
999 pdv->pdv_target_id);
1000 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
1001 break;
1002 case Opt_scsi_lun_id:
1003 match_int(args, &arg);
1004 pdv->pdv_lun_id = arg;
1005 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
1006 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
1007 pdv->pdv_flags |= PDF_HAS_LUN_ID;
1008 break;
1009 default:
1010 break;
1011 }
1012 }
1013
1014out:
1015 kfree(orig);
1016 return (!ret) ? count : ret;
1017}
1018
1019static ssize_t pscsi_check_configfs_dev_params(
1020 struct se_hba *hba,
1021 struct se_subsystem_dev *se_dev)
1022{
1023 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
1024
1025 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
1026 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
1027 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
1028 printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
1029 " scsi_lun_id= parameters\n");
1030 return -1;
1031 }
1032
1033 return 0;
1034}
1035
1036static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
1037 struct se_subsystem_dev *se_dev,
1038 char *b)
1039{
1040 struct pscsi_hba_virt *phv = hba->hba_ptr;
1041 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
1042 struct scsi_device *sd = pdv->pdv_sd;
1043 unsigned char host_id[16];
1044 ssize_t bl;
1045 int i;
1046
1047 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
1048 snprintf(host_id, 16, "%d", pdv->pdv_host_id);
1049 else
1050 snprintf(host_id, 16, "PHBA Mode");
1051
1052 bl = sprintf(b, "SCSI Device Bus Location:"
1053 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
1054 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
1055 host_id);
1056
1057 if (sd) {
1058 bl += sprintf(b + bl, " ");
1059 bl += sprintf(b + bl, "Vendor: ");
1060 for (i = 0; i < 8; i++) {
1061 if (ISPRINT(sd->vendor[i])) /* printable character? */
1062 bl += sprintf(b + bl, "%c", sd->vendor[i]);
1063 else
1064 bl += sprintf(b + bl, " ");
1065 }
1066 bl += sprintf(b + bl, " Model: ");
1067 for (i = 0; i < 16; i++) {
1068 if (ISPRINT(sd->model[i])) /* printable character ? */
1069 bl += sprintf(b + bl, "%c", sd->model[i]);
1070 else
1071 bl += sprintf(b + bl, " ");
1072 }
1073 bl += sprintf(b + bl, " Rev: ");
1074 for (i = 0; i < 4; i++) {
1075 if (ISPRINT(sd->rev[i])) /* printable character ? */
1076 bl += sprintf(b + bl, "%c", sd->rev[i]);
1077 else
1078 bl += sprintf(b + bl, " ");
1079 }
1080 bl += sprintf(b + bl, "\n");
1081 }
1082 return bl;
1083}
1084
1085static void pscsi_bi_endio(struct bio *bio, int error)
1086{
1087 bio_put(bio);
1088}
1089
1090static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
1091{
1092 struct bio *bio;
1093 /*
1094 * Use bio_malloc() following the comment in for bio -> struct request
1095 * in block/blk-core.c:blk_make_request()
1096 */
1097 bio = bio_kmalloc(GFP_KERNEL, sg_num);
1098 if (!(bio)) {
1099 printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
1100 return NULL;
1101 }
1102 bio->bi_end_io = pscsi_bi_endio;
1103
1104 return bio;
1105}
1106
1107#if 0
1108#define DEBUG_PSCSI(x...) printk(x)
1109#else
1110#define DEBUG_PSCSI(x...)
1111#endif
1112
1113static int __pscsi_map_task_SG(
1114 struct se_task *task,
1115 struct scatterlist *task_sg,
1116 u32 task_sg_num,
1117 int bidi_read)
1118{
1119 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1120 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
1121 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
1122 struct page *page;
1123 struct scatterlist *sg;
1124 u32 data_len = task->task_size, i, len, bytes, off;
1125 int nr_pages = (task->task_size + task_sg[0].offset +
1126 PAGE_SIZE - 1) >> PAGE_SHIFT;
1127 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
1128 int rw = (task->task_data_direction == DMA_TO_DEVICE);
1129
1130 if (!task->task_size)
1131 return 0;
1132 /*
1133 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
1134 * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
1135 * struct scatterlist memory. The struct se_task->task_sg[] currently needs
1136 * to be attached to struct bios for submission to Linux/SCSI using
1137 * struct request to struct scsi_device->request_queue.
1138 *
1139 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
1140 * is ported to upstream SCSI passthrough functionality that accepts
1141 * struct scatterlist->page_link or struct page as a paraemeter.
1142 */
1143 DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
1144
1145 for_each_sg(task_sg, sg, task_sg_num, i) {
1146 page = sg_page(sg);
1147 off = sg->offset;
1148 len = sg->length;
1149
1150 DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
1151 page, len, off);
1152
1153 while (len > 0 && data_len > 0) {
1154 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
1155 bytes = min(bytes, data_len);
1156
1157 if (!(bio)) {
1158 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
1159 nr_pages -= nr_vecs;
1160 /*
1161 * Calls bio_kmalloc() and sets bio->bi_end_io()
1162 */
1163 bio = pscsi_get_bio(pdv, nr_vecs);
1164 if (!(bio))
1165 goto fail;
1166
1167 if (rw)
1168 bio->bi_rw |= REQ_WRITE;
1169
1170 DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
1171 " dir: %s nr_vecs: %d\n", bio,
1172 (rw) ? "rw" : "r", nr_vecs);
1173 /*
1174 * Set *hbio pointer to handle the case:
1175 * nr_pages > BIO_MAX_PAGES, where additional
1176 * bios need to be added to complete a given
1177 * struct se_task
1178 */
1179 if (!hbio)
1180 hbio = tbio = bio;
1181 else
1182 tbio = tbio->bi_next = bio;
1183 }
1184
1185 DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
1186 " bio: %p page: %p len: %d off: %d\n", i, bio,
1187 page, len, off);
1188
1189 rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
1190 bio, page, bytes, off);
1191 if (rc != bytes)
1192 goto fail;
1193
1194 DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
1195 bio->bi_vcnt, nr_vecs);
1196
1197 if (bio->bi_vcnt > nr_vecs) {
1198 DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
1199 " %d i: %d bio: %p, allocating another"
1200 " bio\n", bio->bi_vcnt, i, bio);
1201 /*
1202 * Clear the pointer so that another bio will
1203 * be allocated with pscsi_get_bio() above, the
1204 * current bio has already been set *tbio and
1205 * bio->bi_next.
1206 */
1207 bio = NULL;
1208 }
1209
1210 page++;
1211 len -= bytes;
1212 data_len -= bytes;
1213 off = 0;
1214 }
1215 }
1216 /*
1217 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
1218 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
1219 */
1220 if (!(bidi_read)) {
1221 /*
1222 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
1223 * allocate the pSCSI task a struct request.
1224 */
1225 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
1226 hbio, GFP_KERNEL);
1227 if (!(pt->pscsi_req)) {
1228 printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
1229 goto fail;
1230 }
1231 /*
1232 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
1233 * and setup rq callback, CDB and sense.
1234 */
1235 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
1236
1237 return task->task_sg_num;
1238 }
1239 /*
1240 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
1241 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
1242 */
1243 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
1244 hbio, GFP_KERNEL);
1245 if (!(pt->pscsi_req->next_rq)) {
1246 printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
1247 goto fail;
1248 }
1249 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
1250
1251 return task->task_sg_num;
1252fail:
1253 while (hbio) {
1254 bio = hbio;
1255 hbio = hbio->bi_next;
1256 bio->bi_next = NULL;
1257 bio_endio(bio, 0);
1258 }
1259 return ret;
1260}
1261
1262static int pscsi_map_task_SG(struct se_task *task)
1263{
1264 int ret;
1265
1266 /*
1267 * Setup the main struct request for the task->task_sg[] payload
1268 */
1269
1270 ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
1271 if (ret >= 0 && task->task_sg_bidi) {
1272 /*
1273 * If present, set up the extra BIDI-COMMAND SCSI READ
1274 * struct request and payload.
1275 */
1276 ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
1277 task->task_sg_num, 1);
1278 }
1279
1280 if (ret < 0)
1281 return PYX_TRANSPORT_LU_COMM_FAILURE;
1282 return 0;
1283}
1284
1285/* pscsi_map_task_non_SG():
1286 *
1287 *
1288 */
1289static int pscsi_map_task_non_SG(struct se_task *task)
1290{
1291 struct se_cmd *cmd = TASK_CMD(task);
1292 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1293 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
1294 int ret = 0;
1295
1296 if (pscsi_blk_get_request(task) < 0)
1297 return PYX_TRANSPORT_LU_COMM_FAILURE;
1298
1299 if (!task->task_size)
1300 return 0;
1301
1302 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
1303 pt->pscsi_req, T_TASK(cmd)->t_task_buf,
1304 task->task_size, GFP_KERNEL);
1305 if (ret < 0) {
1306 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
1307 return PYX_TRANSPORT_LU_COMM_FAILURE;
1308 }
1309 return 0;
1310}
1311
1312static int pscsi_CDB_none(struct se_task *task)
1313{
1314 return pscsi_blk_get_request(task);
1315}
1316
1317/* pscsi_get_cdb():
1318 *
1319 *
1320 */
1321static unsigned char *pscsi_get_cdb(struct se_task *task)
1322{
1323 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1324
1325 return pt->pscsi_cdb;
1326}
1327
1328/* pscsi_get_sense_buffer():
1329 *
1330 *
1331 */
1332static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1333{
1334 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1335
1336 return (unsigned char *)&pt->pscsi_sense[0];
1337}
1338
1339/* pscsi_get_device_rev():
1340 *
1341 *
1342 */
1343static u32 pscsi_get_device_rev(struct se_device *dev)
1344{
1345 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1346 struct scsi_device *sd = pdv->pdv_sd;
1347
1348 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
1349}
1350
1351/* pscsi_get_device_type():
1352 *
1353 *
1354 */
1355static u32 pscsi_get_device_type(struct se_device *dev)
1356{
1357 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1358 struct scsi_device *sd = pdv->pdv_sd;
1359
1360 return sd->type;
1361}
1362
1363static sector_t pscsi_get_blocks(struct se_device *dev)
1364{
1365 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1366
1367 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1368 return pdv->pdv_bd->bd_part->nr_sects;
1369
1370 dump_stack();
1371 return 0;
1372}
1373
1374/* pscsi_handle_SAM_STATUS_failures():
1375 *
1376 *
1377 */
1378static inline void pscsi_process_SAM_status(
1379 struct se_task *task,
1380 struct pscsi_plugin_task *pt)
1381{
1382 task->task_scsi_status = status_byte(pt->pscsi_result);
1383 if ((task->task_scsi_status)) {
1384 task->task_scsi_status <<= 1;
1385 printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
1386 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1387 pt->pscsi_result);
1388 }
1389
1390 switch (host_byte(pt->pscsi_result)) {
1391 case DID_OK:
1392 transport_complete_task(task, (!task->task_scsi_status));
1393 break;
1394 default:
1395 printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
1396 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1397 pt->pscsi_result);
1398 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1399 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1400 TASK_CMD(task)->transport_error_status =
1401 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1402 transport_complete_task(task, 0);
1403 break;
1404 }
1405
1406 return;
1407}
1408
1409static void pscsi_req_done(struct request *req, int uptodate)
1410{
1411 struct se_task *task = req->end_io_data;
1412 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1413
1414 pt->pscsi_result = req->errors;
1415 pt->pscsi_resid = req->resid_len;
1416
1417 pscsi_process_SAM_status(task, pt);
1418 /*
1419 * Release BIDI-READ if present
1420 */
1421 if (req->next_rq != NULL)
1422 __blk_put_request(req->q, req->next_rq);
1423
1424 __blk_put_request(req->q, req);
1425 pt->pscsi_req = NULL;
1426}
1427
1428static struct se_subsystem_api pscsi_template = {
1429 .name = "pscsi",
1430 .owner = THIS_MODULE,
1431 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
1432 .cdb_none = pscsi_CDB_none,
1433 .map_task_non_SG = pscsi_map_task_non_SG,
1434 .map_task_SG = pscsi_map_task_SG,
1435 .attach_hba = pscsi_attach_hba,
1436 .detach_hba = pscsi_detach_hba,
1437 .pmode_enable_hba = pscsi_pmode_enable_hba,
1438 .allocate_virtdevice = pscsi_allocate_virtdevice,
1439 .create_virtdevice = pscsi_create_virtdevice,
1440 .free_device = pscsi_free_device,
1441 .transport_complete = pscsi_transport_complete,
1442 .alloc_task = pscsi_alloc_task,
1443 .do_task = pscsi_do_task,
1444 .free_task = pscsi_free_task,
1445 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1446 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1447 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
1448 .get_cdb = pscsi_get_cdb,
1449 .get_sense_buffer = pscsi_get_sense_buffer,
1450 .get_device_rev = pscsi_get_device_rev,
1451 .get_device_type = pscsi_get_device_type,
1452 .get_blocks = pscsi_get_blocks,
1453};
1454
1455static int __init pscsi_module_init(void)
1456{
1457 return transport_subsystem_register(&pscsi_template);
1458}
1459
1460static void pscsi_module_exit(void)
1461{
1462 transport_subsystem_release(&pscsi_template);
1463}
1464
1465MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
1466MODULE_AUTHOR("nab@Linux-iSCSI.org");
1467MODULE_LICENSE("GPL");
1468
1469module_init(pscsi_module_init);
1470module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 000000000000..a4cd5d352c3a
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
1#ifndef TARGET_CORE_PSCSI_H
2#define TARGET_CORE_PSCSI_H
3
4#define PSCSI_VERSION "v4.0"
5#define PSCSI_VIRTUAL_HBA_DEPTH 2048
6
7/* used in pscsi_find_alloc_len() */
8#ifndef INQUIRY_DATA_SIZE
9#define INQUIRY_DATA_SIZE 0x24
10#endif
11
12/* used in pscsi_add_device_to_list() */
13#define PSCSI_DEFAULT_QUEUEDEPTH 1
14
15#define PS_RETRY 5
16#define PS_TIMEOUT_DISK (15*HZ)
17#define PS_TIMEOUT_OTHER (500*HZ)
18
19#include <linux/device.h>
20#include <scsi/scsi_driver.h>
21#include <scsi/scsi_device.h>
22#include <linux/kref.h>
23#include <linux/kobject.h>
24
25struct pscsi_plugin_task {
26 struct se_task pscsi_task;
27 unsigned char *pscsi_cdb;
28 unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
29 unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
30 int pscsi_direction;
31 int pscsi_result;
32 u32 pscsi_resid;
33 struct request *pscsi_req;
34} ____cacheline_aligned;
35
36#define PDF_HAS_CHANNEL_ID 0x01
37#define PDF_HAS_TARGET_ID 0x02
38#define PDF_HAS_LUN_ID 0x04
39#define PDF_HAS_VPD_UNIT_SERIAL 0x08
40#define PDF_HAS_VPD_DEV_IDENT 0x10
41#define PDF_HAS_VIRT_HOST_ID 0x20
42
43struct pscsi_dev_virt {
44 int pdv_flags;
45 int pdv_host_id;
46 int pdv_channel_id;
47 int pdv_target_id;
48 int pdv_lun_id;
49 struct block_device *pdv_bd;
50 struct scsi_device *pdv_sd;
51 struct se_hba *pdv_se_hba;
52} ____cacheline_aligned;
53
54typedef enum phv_modes {
55 PHV_VIRUTAL_HOST_ID,
56 PHV_LLD_SCSI_HOST_NO
57} phv_modes_t;
58
59struct pscsi_hba_virt {
60 int phv_host_id;
61 phv_modes_t phv_mode;
62 struct Scsi_Host *phv_lld_host;
63} ____cacheline_aligned;
64
65#endif /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 000000000000..979aebf20019
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
1/*******************************************************************************
2 * Filename: target_core_rd.c
3 *
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30#include <linux/version.h>
31#include <linux/string.h>
32#include <linux/parser.h>
33#include <linux/timer.h>
34#include <linux/blkdev.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/smp_lock.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_device.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45
46#include "target_core_rd.h"
47
48static struct se_subsystem_api rd_dr_template;
49static struct se_subsystem_api rd_mcp_template;
50
51/* #define DEBUG_RAMDISK_MCP */
52/* #define DEBUG_RAMDISK_DR */
53
54/* rd_attach_hba(): (Part of se_subsystem_api_t template)
55 *
56 *
57 */
58static int rd_attach_hba(struct se_hba *hba, u32 host_id)
59{
60 struct rd_host *rd_host;
61
62 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
63 if (!(rd_host)) {
64 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
65 return -ENOMEM;
66 }
67
68 rd_host->rd_host_id = host_id;
69
70 atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
71 atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
72 hba->hba_ptr = (void *) rd_host;
73
74 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
75 " Generic Target Core Stack %s\n", hba->hba_id,
76 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
77 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
78 " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
79 rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
80 RD_MAX_SECTORS);
81
82 return 0;
83}
84
85static void rd_detach_hba(struct se_hba *hba)
86{
87 struct rd_host *rd_host = hba->hba_ptr;
88
89 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
90 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
91
92 kfree(rd_host);
93 hba->hba_ptr = NULL;
94}
95
96/* rd_release_device_space():
97 *
98 *
99 */
100static void rd_release_device_space(struct rd_dev *rd_dev)
101{
102 u32 i, j, page_count = 0, sg_per_table;
103 struct rd_dev_sg_table *sg_table;
104 struct page *pg;
105 struct scatterlist *sg;
106
107 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
108 return;
109
110 sg_table = rd_dev->sg_table_array;
111
112 for (i = 0; i < rd_dev->sg_table_count; i++) {
113 sg = sg_table[i].sg_table;
114 sg_per_table = sg_table[i].rd_sg_count;
115
116 for (j = 0; j < sg_per_table; j++) {
117 pg = sg_page(&sg[j]);
118 if ((pg)) {
119 __free_page(pg);
120 page_count++;
121 }
122 }
123
124 kfree(sg);
125 }
126
127 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
128 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
129 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
130 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
131
132 kfree(sg_table);
133 rd_dev->sg_table_array = NULL;
134 rd_dev->sg_table_count = 0;
135}
136
137
138/* rd_build_device_space():
139 *
140 *
141 */
142static int rd_build_device_space(struct rd_dev *rd_dev)
143{
144 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
145 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
146 sizeof(struct scatterlist));
147 struct rd_dev_sg_table *sg_table;
148 struct page *pg;
149 struct scatterlist *sg;
150
151 if (rd_dev->rd_page_count <= 0) {
152 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
153 rd_dev->rd_page_count);
154 return -1;
155 }
156 total_sg_needed = rd_dev->rd_page_count;
157
158 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
159
160 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
161 if (!(sg_table)) {
162 printk(KERN_ERR "Unable to allocate memory for Ramdisk"
163 " scatterlist tables\n");
164 return -1;
165 }
166
167 rd_dev->sg_table_array = sg_table;
168 rd_dev->sg_table_count = sg_tables;
169
170 while (total_sg_needed) {
171 sg_per_table = (total_sg_needed > max_sg_per_table) ?
172 max_sg_per_table : total_sg_needed;
173
174 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
175 GFP_KERNEL);
176 if (!(sg)) {
177 printk(KERN_ERR "Unable to allocate scatterlist array"
178 " for struct rd_dev\n");
179 return -1;
180 }
181
182 sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
183
184 sg_table[i].sg_table = sg;
185 sg_table[i].rd_sg_count = sg_per_table;
186 sg_table[i].page_start_offset = page_offset;
187 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
188 - 1;
189
190 for (j = 0; j < sg_per_table; j++) {
191 pg = alloc_pages(GFP_KERNEL, 0);
192 if (!(pg)) {
193 printk(KERN_ERR "Unable to allocate scatterlist"
194 " pages for struct rd_dev_sg_table\n");
195 return -1;
196 }
197 sg_assign_page(&sg[j], pg);
198 sg[j].length = PAGE_SIZE;
199 }
200
201 page_offset += sg_per_table;
202 total_sg_needed -= sg_per_table;
203 }
204
205 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
206 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
207 rd_dev->rd_dev_id, rd_dev->rd_page_count,
208 rd_dev->sg_table_count);
209
210 return 0;
211}
212
213static void *rd_allocate_virtdevice(
214 struct se_hba *hba,
215 const char *name,
216 int rd_direct)
217{
218 struct rd_dev *rd_dev;
219 struct rd_host *rd_host = hba->hba_ptr;
220
221 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
222 if (!(rd_dev)) {
223 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
224 return NULL;
225 }
226
227 rd_dev->rd_host = rd_host;
228 rd_dev->rd_direct = rd_direct;
229
230 return rd_dev;
231}
232
233static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
234{
235 return rd_allocate_virtdevice(hba, name, 1);
236}
237
238static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
239{
240 return rd_allocate_virtdevice(hba, name, 0);
241}
242
243/* rd_create_virtdevice():
244 *
245 *
246 */
247static struct se_device *rd_create_virtdevice(
248 struct se_hba *hba,
249 struct se_subsystem_dev *se_dev,
250 void *p,
251 int rd_direct)
252{
253 struct se_device *dev;
254 struct se_dev_limits dev_limits;
255 struct rd_dev *rd_dev = p;
256 struct rd_host *rd_host = hba->hba_ptr;
257 int dev_flags = 0;
258 char prod[16], rev[4];
259
260 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
261
262 if (rd_build_device_space(rd_dev) < 0)
263 goto fail;
264
265 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
266 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
267 RD_MCP_VERSION);
268
269 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
270 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
271 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
272 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
273 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
274
275 dev = transport_add_device_to_core_hba(hba,
276 (rd_dev->rd_direct) ? &rd_dr_template :
277 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
278 &dev_limits, prod, rev);
279 if (!(dev))
280 goto fail;
281
282 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
283 rd_dev->rd_queue_depth = dev->queue_depth;
284
285 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
286 " %u pages in %u tables, %lu total bytes\n",
287 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
288 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
289 rd_dev->sg_table_count,
290 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
291
292 return dev;
293
294fail:
295 rd_release_device_space(rd_dev);
296 return NULL;
297}
298
299static struct se_device *rd_DIRECT_create_virtdevice(
300 struct se_hba *hba,
301 struct se_subsystem_dev *se_dev,
302 void *p)
303{
304 return rd_create_virtdevice(hba, se_dev, p, 1);
305}
306
307static struct se_device *rd_MEMCPY_create_virtdevice(
308 struct se_hba *hba,
309 struct se_subsystem_dev *se_dev,
310 void *p)
311{
312 return rd_create_virtdevice(hba, se_dev, p, 0);
313}
314
315/* rd_free_device(): (Part of se_subsystem_api_t template)
316 *
317 *
318 */
319static void rd_free_device(void *p)
320{
321 struct rd_dev *rd_dev = p;
322
323 rd_release_device_space(rd_dev);
324 kfree(rd_dev);
325}
326
327static inline struct rd_request *RD_REQ(struct se_task *task)
328{
329 return container_of(task, struct rd_request, rd_task);
330}
331
332static struct se_task *
333rd_alloc_task(struct se_cmd *cmd)
334{
335 struct rd_request *rd_req;
336
337 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
338 if (!rd_req) {
339 printk(KERN_ERR "Unable to allocate struct rd_request\n");
340 return NULL;
341 }
342 rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
343
344 return &rd_req->rd_task;
345}
346
347/* rd_get_sg_table():
348 *
349 *
350 */
351static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
352{
353 u32 i;
354 struct rd_dev_sg_table *sg_table;
355
356 for (i = 0; i < rd_dev->sg_table_count; i++) {
357 sg_table = &rd_dev->sg_table_array[i];
358 if ((sg_table->page_start_offset <= page) &&
359 (sg_table->page_end_offset >= page))
360 return sg_table;
361 }
362
363 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
364 page);
365
366 return NULL;
367}
368
369/* rd_MEMCPY_read():
370 *
371 *
372 */
373static int rd_MEMCPY_read(struct rd_request *req)
374{
375 struct se_task *task = &req->rd_task;
376 struct rd_dev *dev = req->rd_dev;
377 struct rd_dev_sg_table *table;
378 struct scatterlist *sg_d, *sg_s;
379 void *dst, *src;
380 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
381 u32 length, page_end = 0, table_sg_end;
382 u32 rd_offset = req->rd_offset;
383
384 table = rd_get_sg_table(dev, req->rd_page);
385 if (!(table))
386 return -1;
387
388 table_sg_end = (table->page_end_offset - req->rd_page);
389 sg_d = task->task_sg;
390 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
391#ifdef DEBUG_RAMDISK_MCP
392 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
393 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
394 req->rd_page, req->rd_offset);
395#endif
396 src_offset = rd_offset;
397
398 while (req->rd_size) {
399 if ((sg_d[i].length - dst_offset) <
400 (sg_s[j].length - src_offset)) {
401 length = (sg_d[i].length - dst_offset);
402#ifdef DEBUG_RAMDISK_MCP
403 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
406 sg_s[j].length);
407 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410#endif
411 if (length > req->rd_size)
412 length = req->rd_size;
413
414 dst = sg_virt(&sg_d[i++]) + dst_offset;
415 if (!dst)
416 BUG();
417
418 src = sg_virt(&sg_s[j]) + src_offset;
419 if (!src)
420 BUG();
421
422 dst_offset = 0;
423 src_offset = length;
424 page_end = 0;
425 } else {
426 length = (sg_s[j].length - src_offset);
427#ifdef DEBUG_RAMDISK_MCP
428 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
429 " offset: %u sg_s[%d].length: %u\n", i,
430 &sg_d[i], sg_d[i].length, sg_d[i].offset,
431 j, sg_s[j].length);
432 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
433 " src_offset: %u\n", length, dst_offset,
434 src_offset);
435#endif
436 if (length > req->rd_size)
437 length = req->rd_size;
438
439 dst = sg_virt(&sg_d[i]) + dst_offset;
440 if (!dst)
441 BUG();
442
443 if (sg_d[i].length == length) {
444 i++;
445 dst_offset = 0;
446 } else
447 dst_offset = length;
448
449 src = sg_virt(&sg_s[j++]) + src_offset;
450 if (!src)
451 BUG();
452
453 src_offset = 0;
454 page_end = 1;
455 }
456
457 memcpy(dst, src, length);
458
459#ifdef DEBUG_RAMDISK_MCP
460 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
461 " i: %u, j: %u\n", req->rd_page,
462 (req->rd_size - length), length, i, j);
463#endif
464 req->rd_size -= length;
465 if (!(req->rd_size))
466 return 0;
467
468 if (!page_end)
469 continue;
470
471 if (++req->rd_page <= table->page_end_offset) {
472#ifdef DEBUG_RAMDISK_MCP
473 printk(KERN_INFO "page: %u in same page table\n",
474 req->rd_page);
475#endif
476 continue;
477 }
478#ifdef DEBUG_RAMDISK_MCP
479 printk(KERN_INFO "getting new page table for page: %u\n",
480 req->rd_page);
481#endif
482 table = rd_get_sg_table(dev, req->rd_page);
483 if (!(table))
484 return -1;
485
486 sg_s = &table->sg_table[j = 0];
487 }
488
489 return 0;
490}
491
492/* rd_MEMCPY_write():
493 *
494 *
495 */
496static int rd_MEMCPY_write(struct rd_request *req)
497{
498 struct se_task *task = &req->rd_task;
499 struct rd_dev *dev = req->rd_dev;
500 struct rd_dev_sg_table *table;
501 struct scatterlist *sg_d, *sg_s;
502 void *dst, *src;
503 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
504 u32 length, page_end = 0, table_sg_end;
505 u32 rd_offset = req->rd_offset;
506
507 table = rd_get_sg_table(dev, req->rd_page);
508 if (!(table))
509 return -1;
510
511 table_sg_end = (table->page_end_offset - req->rd_page);
512 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
513 sg_s = task->task_sg;
514#ifdef DEBUG_RAMDISK_MCP
515 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
516 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
517 req->rd_page, req->rd_offset);
518#endif
519 dst_offset = rd_offset;
520
521 while (req->rd_size) {
522 if ((sg_s[i].length - src_offset) <
523 (sg_d[j].length - dst_offset)) {
524 length = (sg_s[i].length - src_offset);
525#ifdef DEBUG_RAMDISK_MCP
526 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
527 " offset: %d sg_d[%d].length: %u\n", i,
528 &sg_s[i], sg_s[i].length, sg_s[i].offset,
529 j, sg_d[j].length);
530 printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
531 " dst_offset: %u\n", length, src_offset,
532 dst_offset);
533#endif
534 if (length > req->rd_size)
535 length = req->rd_size;
536
537 src = sg_virt(&sg_s[i++]) + src_offset;
538 if (!src)
539 BUG();
540
541 dst = sg_virt(&sg_d[j]) + dst_offset;
542 if (!dst)
543 BUG();
544
545 src_offset = 0;
546 dst_offset = length;
547 page_end = 0;
548 } else {
549 length = (sg_d[j].length - dst_offset);
550#ifdef DEBUG_RAMDISK_MCP
551 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
552 " offset: %d sg_d[%d].length: %u\n", i,
553 &sg_s[i], sg_s[i].length, sg_s[i].offset,
554 j, sg_d[j].length);
555 printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
556 " dst_offset: %u\n", length, src_offset,
557 dst_offset);
558#endif
559 if (length > req->rd_size)
560 length = req->rd_size;
561
562 src = sg_virt(&sg_s[i]) + src_offset;
563 if (!src)
564 BUG();
565
566 if (sg_s[i].length == length) {
567 i++;
568 src_offset = 0;
569 } else
570 src_offset = length;
571
572 dst = sg_virt(&sg_d[j++]) + dst_offset;
573 if (!dst)
574 BUG();
575
576 dst_offset = 0;
577 page_end = 1;
578 }
579
580 memcpy(dst, src, length);
581
582#ifdef DEBUG_RAMDISK_MCP
583 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
584 " i: %u, j: %u\n", req->rd_page,
585 (req->rd_size - length), length, i, j);
586#endif
587 req->rd_size -= length;
588 if (!(req->rd_size))
589 return 0;
590
591 if (!page_end)
592 continue;
593
594 if (++req->rd_page <= table->page_end_offset) {
595#ifdef DEBUG_RAMDISK_MCP
596 printk(KERN_INFO "page: %u in same page table\n",
597 req->rd_page);
598#endif
599 continue;
600 }
601#ifdef DEBUG_RAMDISK_MCP
602 printk(KERN_INFO "getting new page table for page: %u\n",
603 req->rd_page);
604#endif
605 table = rd_get_sg_table(dev, req->rd_page);
606 if (!(table))
607 return -1;
608
609 sg_d = &table->sg_table[j = 0];
610 }
611
612 return 0;
613}
614
615/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
616 *
617 *
618 */
619static int rd_MEMCPY_do_task(struct se_task *task)
620{
621 struct se_device *dev = task->se_dev;
622 struct rd_request *req = RD_REQ(task);
623 unsigned long long lba;
624 int ret;
625
626 req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
627 lba = task->task_lba;
628 req->rd_offset = (do_div(lba,
629 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
630 DEV_ATTRIB(dev)->block_size;
631 req->rd_size = task->task_size;
632
633 if (task->task_data_direction == DMA_FROM_DEVICE)
634 ret = rd_MEMCPY_read(req);
635 else
636 ret = rd_MEMCPY_write(req);
637
638 if (ret != 0)
639 return ret;
640
641 task->task_scsi_status = GOOD;
642 transport_complete_task(task, 1);
643
644 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
645}
646
647/* rd_DIRECT_with_offset():
648 *
649 *
650 */
651static int rd_DIRECT_with_offset(
652 struct se_task *task,
653 struct list_head *se_mem_list,
654 u32 *se_mem_cnt,
655 u32 *task_offset)
656{
657 struct rd_request *req = RD_REQ(task);
658 struct rd_dev *dev = req->rd_dev;
659 struct rd_dev_sg_table *table;
660 struct se_mem *se_mem;
661 struct scatterlist *sg_s;
662 u32 j = 0, set_offset = 1;
663 u32 get_next_table = 0, offset_length, table_sg_end;
664
665 table = rd_get_sg_table(dev, req->rd_page);
666 if (!(table))
667 return -1;
668
669 table_sg_end = (table->page_end_offset - req->rd_page);
670 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
671#ifdef DEBUG_RAMDISK_DR
672 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
673 (task->task_data_direction == DMA_TO_DEVICE) ?
674 "Write" : "Read",
675 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
676#endif
677 while (req->rd_size) {
678 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
679 if (!(se_mem)) {
680 printk(KERN_ERR "Unable to allocate struct se_mem\n");
681 return -1;
682 }
683 INIT_LIST_HEAD(&se_mem->se_list);
684
685 if (set_offset) {
686 offset_length = sg_s[j].length - req->rd_offset;
687 if (offset_length > req->rd_size)
688 offset_length = req->rd_size;
689
690 se_mem->se_page = sg_page(&sg_s[j++]);
691 se_mem->se_off = req->rd_offset;
692 se_mem->se_len = offset_length;
693
694 set_offset = 0;
695 get_next_table = (j > table_sg_end);
696 goto check_eot;
697 }
698
699 offset_length = (req->rd_size < req->rd_offset) ?
700 req->rd_size : req->rd_offset;
701
702 se_mem->se_page = sg_page(&sg_s[j]);
703 se_mem->se_len = offset_length;
704
705 set_offset = 1;
706
707check_eot:
708#ifdef DEBUG_RAMDISK_DR
709 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
710 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
711 req->rd_page, req->rd_size, offset_length, j, se_mem,
712 se_mem->se_page, se_mem->se_off, se_mem->se_len);
713#endif
714 list_add_tail(&se_mem->se_list, se_mem_list);
715 (*se_mem_cnt)++;
716
717 req->rd_size -= offset_length;
718 if (!(req->rd_size))
719 goto out;
720
721 if (!set_offset && !get_next_table)
722 continue;
723
724 if (++req->rd_page <= table->page_end_offset) {
725#ifdef DEBUG_RAMDISK_DR
726 printk(KERN_INFO "page: %u in same page table\n",
727 req->rd_page);
728#endif
729 continue;
730 }
731#ifdef DEBUG_RAMDISK_DR
732 printk(KERN_INFO "getting new page table for page: %u\n",
733 req->rd_page);
734#endif
735 table = rd_get_sg_table(dev, req->rd_page);
736 if (!(table))
737 return -1;
738
739 sg_s = &table->sg_table[j = 0];
740 }
741
742out:
743 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
744#ifdef DEBUG_RAMDISK_DR
745 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
746 *se_mem_cnt);
747#endif
748 return 0;
749}
750
751/* rd_DIRECT_without_offset():
752 *
753 *
754 */
755static int rd_DIRECT_without_offset(
756 struct se_task *task,
757 struct list_head *se_mem_list,
758 u32 *se_mem_cnt,
759 u32 *task_offset)
760{
761 struct rd_request *req = RD_REQ(task);
762 struct rd_dev *dev = req->rd_dev;
763 struct rd_dev_sg_table *table;
764 struct se_mem *se_mem;
765 struct scatterlist *sg_s;
766 u32 length, j = 0;
767
768 table = rd_get_sg_table(dev, req->rd_page);
769 if (!(table))
770 return -1;
771
772 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
773#ifdef DEBUG_RAMDISK_DR
774 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
775 (task->task_data_direction == DMA_TO_DEVICE) ?
776 "Write" : "Read",
777 task->task_lba, req->rd_size, req->rd_page);
778#endif
779 while (req->rd_size) {
780 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
781 if (!(se_mem)) {
782 printk(KERN_ERR "Unable to allocate struct se_mem\n");
783 return -1;
784 }
785 INIT_LIST_HEAD(&se_mem->se_list);
786
787 length = (req->rd_size < sg_s[j].length) ?
788 req->rd_size : sg_s[j].length;
789
790 se_mem->se_page = sg_page(&sg_s[j++]);
791 se_mem->se_len = length;
792
793#ifdef DEBUG_RAMDISK_DR
794 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
795 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
796 req->rd_size, j, se_mem, se_mem->se_page,
797 se_mem->se_off, se_mem->se_len);
798#endif
799 list_add_tail(&se_mem->se_list, se_mem_list);
800 (*se_mem_cnt)++;
801
802 req->rd_size -= length;
803 if (!(req->rd_size))
804 goto out;
805
806 if (++req->rd_page <= table->page_end_offset) {
807#ifdef DEBUG_RAMDISK_DR
808 printk("page: %u in same page table\n",
809 req->rd_page);
810#endif
811 continue;
812 }
813#ifdef DEBUG_RAMDISK_DR
814 printk(KERN_INFO "getting new page table for page: %u\n",
815 req->rd_page);
816#endif
817 table = rd_get_sg_table(dev, req->rd_page);
818 if (!(table))
819 return -1;
820
821 sg_s = &table->sg_table[j = 0];
822 }
823
824out:
825 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
826#ifdef DEBUG_RAMDISK_DR
827 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
828 *se_mem_cnt);
829#endif
830 return 0;
831}
832
833/* rd_DIRECT_do_se_mem_map():
834 *
835 *
836 */
837static int rd_DIRECT_do_se_mem_map(
838 struct se_task *task,
839 struct list_head *se_mem_list,
840 void *in_mem,
841 struct se_mem *in_se_mem,
842 struct se_mem **out_se_mem,
843 u32 *se_mem_cnt,
844 u32 *task_offset_in)
845{
846 struct se_cmd *cmd = task->task_se_cmd;
847 struct rd_request *req = RD_REQ(task);
848 u32 task_offset = *task_offset_in;
849 unsigned long long lba;
850 int ret;
851
852 req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
853 PAGE_SIZE);
854 lba = task->task_lba;
855 req->rd_offset = (do_div(lba,
856 (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
857 DEV_ATTRIB(task->se_dev)->block_size;
858 req->rd_size = task->task_size;
859
860 if (req->rd_offset)
861 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
862 task_offset_in);
863 else
864 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
865 task_offset_in);
866
867 if (ret < 0)
868 return ret;
869
870 if (CMD_TFO(cmd)->task_sg_chaining == 0)
871 return 0;
872 /*
873 * Currently prevent writers from multiple HW fabrics doing
874 * pci_map_sg() to RD_DR's internal scatterlist memory.
875 */
876 if (cmd->data_direction == DMA_TO_DEVICE) {
877 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
878 " RAMDISK_DR with task_sg_chaining=1\n");
879 return -1;
880 }
881 /*
882 * Special case for if task_sg_chaining is enabled, then
883 * we setup struct se_task->task_sg[], as it will be used by
884 * transport_do_task_sg_chain() for creating chainged SGLs
885 * across multiple struct se_task->task_sg[].
886 */
887 if (!(transport_calc_sg_num(task,
888 list_entry(T_TASK(cmd)->t_mem_list->next,
889 struct se_mem, se_list),
890 task_offset)))
891 return -1;
892
893 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
894 list_entry(T_TASK(cmd)->t_mem_list->next,
895 struct se_mem, se_list),
896 out_se_mem, se_mem_cnt, task_offset_in);
897}
898
899/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
900 *
901 *
902 */
903static int rd_DIRECT_do_task(struct se_task *task)
904{
905 /*
906 * At this point the locally allocated RD tables have been mapped
907 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
908 */
909 task->task_scsi_status = GOOD;
910 transport_complete_task(task, 1);
911
912 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
913}
914
915/* rd_free_task(): (Part of se_subsystem_api_t template)
916 *
917 *
918 */
919static void rd_free_task(struct se_task *task)
920{
921 kfree(RD_REQ(task));
922}
923
924enum {
925 Opt_rd_pages, Opt_err
926};
927
928static match_table_t tokens = {
929 {Opt_rd_pages, "rd_pages=%d"},
930 {Opt_err, NULL}
931};
932
933static ssize_t rd_set_configfs_dev_params(
934 struct se_hba *hba,
935 struct se_subsystem_dev *se_dev,
936 const char *page,
937 ssize_t count)
938{
939 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
940 char *orig, *ptr, *opts;
941 substring_t args[MAX_OPT_ARGS];
942 int ret = 0, arg, token;
943
944 opts = kstrdup(page, GFP_KERNEL);
945 if (!opts)
946 return -ENOMEM;
947
948 orig = opts;
949
950 while ((ptr = strsep(&opts, ",")) != NULL) {
951 if (!*ptr)
952 continue;
953
954 token = match_token(ptr, tokens, args);
955 switch (token) {
956 case Opt_rd_pages:
957 match_int(args, &arg);
958 rd_dev->rd_page_count = arg;
959 printk(KERN_INFO "RAMDISK: Referencing Page"
960 " Count: %u\n", rd_dev->rd_page_count);
961 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
962 break;
963 default:
964 break;
965 }
966 }
967
968 kfree(orig);
969 return (!ret) ? count : ret;
970}
971
972static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
973{
974 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
975
976 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
977 printk(KERN_INFO "Missing rd_pages= parameter\n");
978 return -1;
979 }
980
981 return 0;
982}
983
984static ssize_t rd_show_configfs_dev_params(
985 struct se_hba *hba,
986 struct se_subsystem_dev *se_dev,
987 char *b)
988{
989 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
990 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
991 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
992 "rd_direct" : "rd_mcp");
993 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
994 " SG_table_count: %u\n", rd_dev->rd_page_count,
995 PAGE_SIZE, rd_dev->sg_table_count);
996 return bl;
997}
998
999/* rd_get_cdb(): (Part of se_subsystem_api_t template)
1000 *
1001 *
1002 */
1003static unsigned char *rd_get_cdb(struct se_task *task)
1004{
1005 struct rd_request *req = RD_REQ(task);
1006
1007 return req->rd_scsi_cdb;
1008}
1009
1010static u32 rd_get_device_rev(struct se_device *dev)
1011{
1012 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1013}
1014
1015static u32 rd_get_device_type(struct se_device *dev)
1016{
1017 return TYPE_DISK;
1018}
1019
1020static sector_t rd_get_blocks(struct se_device *dev)
1021{
1022 struct rd_dev *rd_dev = dev->dev_ptr;
1023 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1024 DEV_ATTRIB(dev)->block_size) - 1;
1025
1026 return blocks_long;
1027}
1028
1029static struct se_subsystem_api rd_dr_template = {
1030 .name = "rd_dr",
1031 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1032 .attach_hba = rd_attach_hba,
1033 .detach_hba = rd_detach_hba,
1034 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1035 .create_virtdevice = rd_DIRECT_create_virtdevice,
1036 .free_device = rd_free_device,
1037 .alloc_task = rd_alloc_task,
1038 .do_task = rd_DIRECT_do_task,
1039 .free_task = rd_free_task,
1040 .check_configfs_dev_params = rd_check_configfs_dev_params,
1041 .set_configfs_dev_params = rd_set_configfs_dev_params,
1042 .show_configfs_dev_params = rd_show_configfs_dev_params,
1043 .get_cdb = rd_get_cdb,
1044 .get_device_rev = rd_get_device_rev,
1045 .get_device_type = rd_get_device_type,
1046 .get_blocks = rd_get_blocks,
1047 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1048};
1049
1050static struct se_subsystem_api rd_mcp_template = {
1051 .name = "rd_mcp",
1052 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1053 .attach_hba = rd_attach_hba,
1054 .detach_hba = rd_detach_hba,
1055 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
1056 .create_virtdevice = rd_MEMCPY_create_virtdevice,
1057 .free_device = rd_free_device,
1058 .alloc_task = rd_alloc_task,
1059 .do_task = rd_MEMCPY_do_task,
1060 .free_task = rd_free_task,
1061 .check_configfs_dev_params = rd_check_configfs_dev_params,
1062 .set_configfs_dev_params = rd_set_configfs_dev_params,
1063 .show_configfs_dev_params = rd_show_configfs_dev_params,
1064 .get_cdb = rd_get_cdb,
1065 .get_device_rev = rd_get_device_rev,
1066 .get_device_type = rd_get_device_type,
1067 .get_blocks = rd_get_blocks,
1068};
1069
1070int __init rd_module_init(void)
1071{
1072 int ret;
1073
1074 ret = transport_subsystem_register(&rd_dr_template);
1075 if (ret < 0)
1076 return ret;
1077
1078 ret = transport_subsystem_register(&rd_mcp_template);
1079 if (ret < 0) {
1080 transport_subsystem_release(&rd_dr_template);
1081 return ret;
1082 }
1083
1084 return 0;
1085}
1086
1087void rd_module_exit(void)
1088{
1089 transport_subsystem_release(&rd_dr_template);
1090 transport_subsystem_release(&rd_mcp_template);
1091}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 000000000000..13badfbaf9c0
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
1#ifndef TARGET_CORE_RD_H
2#define TARGET_CORE_RD_H
3
4#define RD_HBA_VERSION "v4.0"
5#define RD_DR_VERSION "4.0"
6#define RD_MCP_VERSION "4.0"
7
8/* Largest piece of memory kmalloc can allocate */
9#define RD_MAX_ALLOCATION_SIZE 65536
10/* Maximum queuedepth for the Ramdisk HBA */
11#define RD_HBA_QUEUE_DEPTH 256
12#define RD_DEVICE_QUEUE_DEPTH 32
13#define RD_MAX_DEVICE_QUEUE_DEPTH 128
14#define RD_BLOCKSIZE 512
15#define RD_MAX_SECTORS 1024
16
17extern struct kmem_cache *se_mem_cache;
18
19/* Used in target_core_init_configfs() for virtual LUN 0 access */
20int __init rd_module_init(void);
21void rd_module_exit(void);
22
23#define RRF_EMULATE_CDB 0x01
24#define RRF_GOT_LBA 0x02
25
26struct rd_request {
27 struct se_task rd_task;
28
29 /* SCSI CDB from iSCSI Command PDU */
30 unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
31 /* Offset from start of page */
32 u32 rd_offset;
33 /* Starting page in Ramdisk for request */
34 u32 rd_page;
35 /* Total number of pages needed for request */
36 u32 rd_page_count;
37 /* Scatterlist count */
38 u32 rd_size;
39 /* Ramdisk device */
40 struct rd_dev *rd_dev;
41} ____cacheline_aligned;
42
43struct rd_dev_sg_table {
44 u32 page_start_offset;
45 u32 page_end_offset;
46 u32 rd_sg_count;
47 struct scatterlist *sg_table;
48} ____cacheline_aligned;
49
50#define RDF_HAS_PAGE_COUNT 0x01
51
52struct rd_dev {
53 int rd_direct;
54 u32 rd_flags;
55 /* Unique Ramdisk Device ID in Ramdisk HBA */
56 u32 rd_dev_id;
57 /* Total page count for ramdisk device */
58 u32 rd_page_count;
59 /* Number of SG tables in sg_table_array */
60 u32 sg_table_count;
61 u32 rd_queue_depth;
62 /* Array of rd_dev_sg_table_t containing scatterlists */
63 struct rd_dev_sg_table *sg_table_array;
64 /* Ramdisk HBA device is connected to */
65 struct rd_host *rd_host;
66} ____cacheline_aligned;
67
68struct rd_host {
69 u32 rd_host_dev_id_count;
70 u32 rd_host_id; /* Unique Ramdisk Host ID */
71} ____cacheline_aligned;
72
73#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 000000000000..dc6fed037ab3
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
1/*******************************************************************************
2 * Filename: target_core_scdb.c
3 *
4 * This file contains the generic target engine Split CDB related functions.
5 *
6 * Copyright (c) 2004-2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <scsi/scsi.h>
32#include <asm/unaligned.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_transport.h>
36
37#include "target_core_scdb.h"
38
39/* split_cdb_XX_6():
40 *
41 * 21-bit LBA w/ 8-bit SECTORS
42 */
43void split_cdb_XX_6(
44 unsigned long long lba,
45 u32 *sectors,
46 unsigned char *cdb)
47{
48 cdb[1] = (lba >> 16) & 0x1f;
49 cdb[2] = (lba >> 8) & 0xff;
50 cdb[3] = lba & 0xff;
51 cdb[4] = *sectors & 0xff;
52}
53
54/* split_cdb_XX_10():
55 *
56 * 32-bit LBA w/ 16-bit SECTORS
57 */
58void split_cdb_XX_10(
59 unsigned long long lba,
60 u32 *sectors,
61 unsigned char *cdb)
62{
63 put_unaligned_be32(lba, &cdb[2]);
64 put_unaligned_be16(*sectors, &cdb[7]);
65}
66
67/* split_cdb_XX_12():
68 *
69 * 32-bit LBA w/ 32-bit SECTORS
70 */
71void split_cdb_XX_12(
72 unsigned long long lba,
73 u32 *sectors,
74 unsigned char *cdb)
75{
76 put_unaligned_be32(lba, &cdb[2]);
77 put_unaligned_be32(*sectors, &cdb[6]);
78}
79
80/* split_cdb_XX_16():
81 *
82 * 64-bit LBA w/ 32-bit SECTORS
83 */
84void split_cdb_XX_16(
85 unsigned long long lba,
86 u32 *sectors,
87 unsigned char *cdb)
88{
89 put_unaligned_be64(lba, &cdb[2]);
90 put_unaligned_be32(*sectors, &cdb[10]);
91}
92
93/*
94 * split_cdb_XX_32():
95 *
96 * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
97 */
98void split_cdb_XX_32(
99 unsigned long long lba,
100 u32 *sectors,
101 unsigned char *cdb)
102{
103 put_unaligned_be64(lba, &cdb[12]);
104 put_unaligned_be32(*sectors, &cdb[28]);
105}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 000000000000..98cd1c01ed83
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
1#ifndef TARGET_CORE_SCDB_H
2#define TARGET_CORE_SCDB_H
3
4extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
5extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
6extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
7extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
8extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
9
10#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 000000000000..158cecbec718
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,404 @@
1/*******************************************************************************
2 * Filename: target_core_tmr.c
3 *
4 * This file contains SPC-3 task management infrastructure
5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/list.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33
34#include <target/target_core_base.h>
35#include <target/target_core_device.h>
36#include <target/target_core_tmr.h>
37#include <target/target_core_transport.h>
38#include <target/target_core_fabric_ops.h>
39#include <target/target_core_configfs.h>
40
41#include "target_core_alua.h"
42#include "target_core_pr.h"
43
44#define DEBUG_LUN_RESET
45#ifdef DEBUG_LUN_RESET
46#define DEBUG_LR(x...) printk(KERN_INFO x)
47#else
48#define DEBUG_LR(x...)
49#endif
50
51struct se_tmr_req *core_tmr_alloc_req(
52 struct se_cmd *se_cmd,
53 void *fabric_tmr_ptr,
54 u8 function)
55{
56 struct se_tmr_req *tmr;
57
58 tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
59 if (!(tmr)) {
60 printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
61 return ERR_PTR(-ENOMEM);
62 }
63 tmr->task_cmd = se_cmd;
64 tmr->fabric_tmr_ptr = fabric_tmr_ptr;
65 tmr->function = function;
66 INIT_LIST_HEAD(&tmr->tmr_list);
67
68 return tmr;
69}
70EXPORT_SYMBOL(core_tmr_alloc_req);
71
72void core_tmr_release_req(
73 struct se_tmr_req *tmr)
74{
75 struct se_device *dev = tmr->tmr_dev;
76
77 spin_lock(&dev->se_tmr_lock);
78 list_del(&tmr->tmr_list);
79 kmem_cache_free(se_tmr_req_cache, tmr);
80 spin_unlock(&dev->se_tmr_lock);
81}
82
83static void core_tmr_handle_tas_abort(
84 struct se_node_acl *tmr_nacl,
85 struct se_cmd *cmd,
86 int tas,
87 int fe_count)
88{
89 if (!(fe_count)) {
90 transport_cmd_finish_abort(cmd, 1);
91 return;
92 }
93 /*
94 * TASK ABORTED status (TAS) bit support
95 */
96 if (((tmr_nacl != NULL) &&
97 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
98 transport_send_task_abort(cmd);
99
100 transport_cmd_finish_abort(cmd, 0);
101}
102
103int core_tmr_lun_reset(
104 struct se_device *dev,
105 struct se_tmr_req *tmr,
106 struct list_head *preempt_and_abort_list,
107 struct se_cmd *prout_cmd)
108{
109 struct se_cmd *cmd;
110 struct se_queue_req *qr, *qr_tmp;
111 struct se_node_acl *tmr_nacl = NULL;
112 struct se_portal_group *tmr_tpg = NULL;
113 struct se_queue_obj *qobj = dev->dev_queue_obj;
114 struct se_tmr_req *tmr_p, *tmr_pp;
115 struct se_task *task, *task_tmp;
116 unsigned long flags;
117 int fe_count, state, tas;
118 /*
119 * TASK_ABORTED status bit, this is configurable via ConfigFS
120 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
121 *
122 * A task aborted status (TAS) bit set to zero specifies that aborted
123 * tasks shall be terminated by the device server without any response
124 * to the application client. A TAS bit set to one specifies that tasks
125 * aborted by the actions of an I_T nexus other than the I_T nexus on
126 * which the command was received shall be completed with TASK ABORTED
127 * status (see SAM-4).
128 */
129 tas = DEV_ATTRIB(dev)->emulate_tas;
130 /*
131 * Determine if this se_tmr is coming from a $FABRIC_MOD
132 * or struct se_device passthrough..
133 */
134 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
135 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
136 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
137 if (tmr_nacl && tmr_tpg) {
138 DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
139 " initiator port %s\n",
140 TPG_TFO(tmr_tpg)->get_fabric_name(),
141 tmr_nacl->initiatorname);
142 }
143 }
144 DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
145 (preempt_and_abort_list) ? "Preempt" : "TMR",
146 TRANSPORT(dev)->name, tas);
147 /*
148 * Release all pending and outgoing TMRs aside from the received
149 * LUN_RESET tmr..
150 */
151 spin_lock(&dev->se_tmr_lock);
152 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
153 /*
154 * Allow the received TMR to return with FUNCTION_COMPLETE.
155 */
156 if (tmr && (tmr_p == tmr))
157 continue;
158
159 cmd = tmr_p->task_cmd;
160 if (!(cmd)) {
161 printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
162 continue;
163 }
164 /*
165 * If this function was called with a valid pr_res_key
166 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
167 * skip non regisration key matching TMRs.
168 */
169 if ((preempt_and_abort_list != NULL) &&
170 (core_scsi3_check_cdb_abort_and_preempt(
171 preempt_and_abort_list, cmd) != 0))
172 continue;
173 spin_unlock(&dev->se_tmr_lock);
174
175 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
176 if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
177 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
178 spin_lock(&dev->se_tmr_lock);
179 continue;
180 }
181 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
182 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
183 spin_lock(&dev->se_tmr_lock);
184 continue;
185 }
186 DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
187 " Response: 0x%02x, t_state: %d\n",
188 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
189 tmr_p->function, tmr_p->response, cmd->t_state);
190 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
191
192 transport_cmd_finish_abort_tmr(cmd);
193 spin_lock(&dev->se_tmr_lock);
194 }
195 spin_unlock(&dev->se_tmr_lock);
196 /*
197 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
198 * This is following sam4r17, section 5.6 Aborting commands, Table 38
199 * for TMR LUN_RESET:
200 *
201 * a) "Yes" indicates that each command that is aborted on an I_T nexus
202 * other than the one that caused the SCSI device condition is
203 * completed with TASK ABORTED status, if the TAS bit is set to one in
204 * the Control mode page (see SPC-4). "No" indicates that no status is
205 * returned for aborted commands.
206 *
207 * d) If the logical unit reset is caused by a particular I_T nexus
208 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
209 * (TASK_ABORTED status) applies.
210 *
211 * Otherwise (e.g., if triggered by a hard reset), "no"
212 * (no TASK_ABORTED SAM status) applies.
213 *
214 * Note that this seems to be independent of TAS (Task Aborted Status)
215 * in the Control Mode Page.
216 */
217 spin_lock_irqsave(&dev->execute_task_lock, flags);
218 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
219 t_state_list) {
220 if (!(TASK_CMD(task))) {
221 printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
222 continue;
223 }
224 cmd = TASK_CMD(task);
225
226 if (!T_TASK(cmd)) {
227 printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
228 " %p ITT: 0x%08x\n", task, cmd,
229 CMD_TFO(cmd)->get_task_tag(cmd));
230 continue;
231 }
232 /*
233 * For PREEMPT_AND_ABORT usage, only process commands
234 * with a matching reservation key.
235 */
236 if ((preempt_and_abort_list != NULL) &&
237 (core_scsi3_check_cdb_abort_and_preempt(
238 preempt_and_abort_list, cmd) != 0))
239 continue;
240 /*
241 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
242 */
243 if (prout_cmd == cmd)
244 continue;
245
246 list_del(&task->t_state_list);
247 atomic_set(&task->task_state_active, 0);
248 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
249
250 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
251 DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
252 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
253 "def_t_state: %d/%d cdb: 0x%02x\n",
254 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
255 CMD_TFO(cmd)->get_task_tag(cmd), 0,
256 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
257 cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
258 DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
259 " t_task_cdbs: %d t_task_cdbs_left: %d"
260 " t_task_cdbs_sent: %d -- t_transport_active: %d"
261 " t_transport_stop: %d t_transport_sent: %d\n",
262 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
263 T_TASK(cmd)->t_task_cdbs,
264 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
265 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
266 atomic_read(&T_TASK(cmd)->t_transport_active),
267 atomic_read(&T_TASK(cmd)->t_transport_stop),
268 atomic_read(&T_TASK(cmd)->t_transport_sent));
269
270 if (atomic_read(&task->task_active)) {
271 atomic_set(&task->task_stop, 1);
272 spin_unlock_irqrestore(
273 &T_TASK(cmd)->t_state_lock, flags);
274
275 DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
276 " for dev: %p\n", task, dev);
277 wait_for_completion(&task->task_stop_comp);
278 DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
279 " dev: %p\n", task, dev);
280 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
281 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
282
283 atomic_set(&task->task_active, 0);
284 atomic_set(&task->task_stop, 0);
285 }
286 __transport_stop_task_timer(task, &flags);
287
288 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
289 spin_unlock_irqrestore(
290 &T_TASK(cmd)->t_state_lock, flags);
291 DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
292 " t_task_cdbs_ex_left: %d\n", task, dev,
293 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
294
295 spin_lock_irqsave(&dev->execute_task_lock, flags);
296 continue;
297 }
298 fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
299
300 if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
301 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
302 " task: %p, t_fe_count: %d dev: %p\n", task,
303 fe_count, dev);
304 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
305 flags);
306 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
307
308 spin_lock_irqsave(&dev->execute_task_lock, flags);
309 continue;
310 }
311 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
312 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
313 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
314 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
315
316 spin_lock_irqsave(&dev->execute_task_lock, flags);
317 }
318 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
319 /*
320 * Release all commands remaining in the struct se_device cmd queue.
321 *
322 * This follows the same logic as above for the struct se_device
323 * struct se_task state list, where commands are returned with
324 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
325 * reference, otherwise the struct se_cmd is released.
326 */
327 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
328 list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
329 cmd = (struct se_cmd *)qr->cmd;
330 if (!(cmd)) {
331 /*
332 * Skip these for non PREEMPT_AND_ABORT usage..
333 */
334 if (preempt_and_abort_list != NULL)
335 continue;
336
337 atomic_dec(&qobj->queue_cnt);
338 list_del(&qr->qr_list);
339 kfree(qr);
340 continue;
341 }
342 /*
343 * For PREEMPT_AND_ABORT usage, only process commands
344 * with a matching reservation key.
345 */
346 if ((preempt_and_abort_list != NULL) &&
347 (core_scsi3_check_cdb_abort_and_preempt(
348 preempt_and_abort_list, cmd) != 0))
349 continue;
350 /*
351 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
352 */
353 if (prout_cmd == cmd)
354 continue;
355
356 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
357 atomic_dec(&qobj->queue_cnt);
358 list_del(&qr->qr_list);
359 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
360
361 state = qr->state;
362 kfree(qr);
363
364 DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
365 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
366 "Preempt" : "", cmd, state,
367 atomic_read(&T_TASK(cmd)->t_fe_count));
368 /*
369 * Signal that the command has failed via cmd->se_cmd_flags,
370 * and call TFO->new_cmd_failure() to wakeup any fabric
371 * dependent code used to wait for unsolicited data out
372 * allocation to complete. The fabric module is expected
373 * to dump any remaining unsolicited data out for the aborted
374 * command at this point.
375 */
376 transport_new_cmd_failure(cmd);
377
378 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
379 atomic_read(&T_TASK(cmd)->t_fe_count));
380 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
381 }
382 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
383 /*
384 * Clear any legacy SPC-2 reservation when called during
385 * LOGICAL UNIT RESET
386 */
387 if (!(preempt_and_abort_list) &&
388 (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
389 spin_lock(&dev->dev_reservation_lock);
390 dev->dev_reserved_node_acl = NULL;
391 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
392 spin_unlock(&dev->dev_reservation_lock);
393 printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
394 }
395
396 spin_lock(&dev->stats_lock);
397 dev->num_resets++;
398 spin_unlock(&dev->stats_lock);
399
400 DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
401 (preempt_and_abort_list) ? "Preempt" : "TMR",
402 TRANSPORT(dev)->name);
403 return 0;
404}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 000000000000..abfa81a57115
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,826 @@
1/*******************************************************************************
2 * Filename: target_core_tpg.c
3 *
4 * This file contains generic Target Portal Group related functions.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/net.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/in.h>
36#include <net/sock.h>
37#include <net/tcp.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40
41#include <target/target_core_base.h>
42#include <target/target_core_device.h>
43#include <target/target_core_tpg.h>
44#include <target/target_core_transport.h>
45#include <target/target_core_fabric_ops.h>
46
47#include "target_core_hba.h"
48
49/* core_clear_initiator_node_from_tpg():
50 *
51 *
52 */
53static void core_clear_initiator_node_from_tpg(
54 struct se_node_acl *nacl,
55 struct se_portal_group *tpg)
56{
57 int i;
58 struct se_dev_entry *deve;
59 struct se_lun *lun;
60 struct se_lun_acl *acl, *acl_tmp;
61
62 spin_lock_irq(&nacl->device_list_lock);
63 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64 deve = &nacl->device_list[i];
65
66 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67 continue;
68
69 if (!deve->se_lun) {
70 printk(KERN_ERR "%s device entries device pointer is"
71 " NULL, but Initiator has access.\n",
72 TPG_TFO(tpg)->get_fabric_name());
73 continue;
74 }
75
76 lun = deve->se_lun;
77 spin_unlock_irq(&nacl->device_list_lock);
78 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
79 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
80
81 spin_lock(&lun->lun_acl_lock);
82 list_for_each_entry_safe(acl, acl_tmp,
83 &lun->lun_acl_list, lacl_list) {
84 if (!(strcmp(acl->initiatorname,
85 nacl->initiatorname)) &&
86 (acl->mapped_lun == deve->mapped_lun))
87 break;
88 }
89
90 if (!acl) {
91 printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
92 " mapped_lun: %u\n", nacl->initiatorname,
93 deve->mapped_lun);
94 spin_unlock(&lun->lun_acl_lock);
95 spin_lock_irq(&nacl->device_list_lock);
96 continue;
97 }
98
99 list_del(&acl->lacl_list);
100 spin_unlock(&lun->lun_acl_lock);
101
102 spin_lock_irq(&nacl->device_list_lock);
103 kfree(acl);
104 }
105 spin_unlock_irq(&nacl->device_list_lock);
106}
107
108/* __core_tpg_get_initiator_node_acl():
109 *
110 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
111 */
112struct se_node_acl *__core_tpg_get_initiator_node_acl(
113 struct se_portal_group *tpg,
114 const char *initiatorname)
115{
116 struct se_node_acl *acl;
117
118 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
119 if (!(strcmp(acl->initiatorname, initiatorname)))
120 return acl;
121 }
122
123 return NULL;
124}
125
126/* core_tpg_get_initiator_node_acl():
127 *
128 *
129 */
130struct se_node_acl *core_tpg_get_initiator_node_acl(
131 struct se_portal_group *tpg,
132 unsigned char *initiatorname)
133{
134 struct se_node_acl *acl;
135
136 spin_lock_bh(&tpg->acl_node_lock);
137 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
138 if (!(strcmp(acl->initiatorname, initiatorname)) &&
139 (!(acl->dynamic_node_acl))) {
140 spin_unlock_bh(&tpg->acl_node_lock);
141 return acl;
142 }
143 }
144 spin_unlock_bh(&tpg->acl_node_lock);
145
146 return NULL;
147}
148
149/* core_tpg_add_node_to_devs():
150 *
151 *
152 */
153void core_tpg_add_node_to_devs(
154 struct se_node_acl *acl,
155 struct se_portal_group *tpg)
156{
157 int i = 0;
158 u32 lun_access = 0;
159 struct se_lun *lun;
160 struct se_device *dev;
161
162 spin_lock(&tpg->tpg_lun_lock);
163 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
164 lun = &tpg->tpg_lun_list[i];
165 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
166 continue;
167
168 spin_unlock(&tpg->tpg_lun_lock);
169
170 dev = lun->lun_se_dev;
171 /*
172 * By default in LIO-Target $FABRIC_MOD,
173 * demo_mode_write_protect is ON, or READ_ONLY;
174 */
175 if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
176 if (dev->dev_flags & DF_READ_ONLY)
177 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
178 else
179 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
180 } else {
181 /*
182 * Allow only optical drives to issue R/W in default RO
183 * demo mode.
184 */
185 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
186 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
187 else
188 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
189 }
190
191 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
192 " access for LUN in Demo Mode\n",
193 TPG_TFO(tpg)->get_fabric_name(),
194 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
195 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
196 "READ-WRITE" : "READ-ONLY");
197
198 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
199 lun_access, acl, tpg, 1);
200 spin_lock(&tpg->tpg_lun_lock);
201 }
202 spin_unlock(&tpg->tpg_lun_lock);
203}
204
205/* core_set_queue_depth_for_node():
206 *
207 *
208 */
209static int core_set_queue_depth_for_node(
210 struct se_portal_group *tpg,
211 struct se_node_acl *acl)
212{
213 if (!acl->queue_depth) {
214 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
215 "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
216 acl->initiatorname);
217 acl->queue_depth = 1;
218 }
219
220 return 0;
221}
222
223/* core_create_device_list_for_node():
224 *
225 *
226 */
227static int core_create_device_list_for_node(struct se_node_acl *nacl)
228{
229 struct se_dev_entry *deve;
230 int i;
231
232 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
233 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
234 if (!(nacl->device_list)) {
235 printk(KERN_ERR "Unable to allocate memory for"
236 " struct se_node_acl->device_list\n");
237 return -1;
238 }
239 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
240 deve = &nacl->device_list[i];
241
242 atomic_set(&deve->ua_count, 0);
243 atomic_set(&deve->pr_ref_count, 0);
244 spin_lock_init(&deve->ua_lock);
245 INIT_LIST_HEAD(&deve->alua_port_list);
246 INIT_LIST_HEAD(&deve->ua_list);
247 }
248
249 return 0;
250}
251
252/* core_tpg_check_initiator_node_acl()
253 *
254 *
255 */
256struct se_node_acl *core_tpg_check_initiator_node_acl(
257 struct se_portal_group *tpg,
258 unsigned char *initiatorname)
259{
260 struct se_node_acl *acl;
261
262 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
263 if ((acl))
264 return acl;
265
266 if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
267 return NULL;
268
269 acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
270 if (!(acl))
271 return NULL;
272
273 INIT_LIST_HEAD(&acl->acl_list);
274 INIT_LIST_HEAD(&acl->acl_sess_list);
275 spin_lock_init(&acl->device_list_lock);
276 spin_lock_init(&acl->nacl_sess_lock);
277 atomic_set(&acl->acl_pr_ref_count, 0);
278 atomic_set(&acl->mib_ref_count, 0);
279 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
280 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
281 acl->se_tpg = tpg;
282 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
283 spin_lock_init(&acl->stats_lock);
284 acl->dynamic_node_acl = 1;
285
286 TPG_TFO(tpg)->set_default_node_attributes(acl);
287
288 if (core_create_device_list_for_node(acl) < 0) {
289 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
290 return NULL;
291 }
292
293 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
294 core_free_device_list_for_node(acl, tpg);
295 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
296 return NULL;
297 }
298
299 core_tpg_add_node_to_devs(acl, tpg);
300
301 spin_lock_bh(&tpg->acl_node_lock);
302 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
303 tpg->num_node_acls++;
304 spin_unlock_bh(&tpg->acl_node_lock);
305
306 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
307 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
308 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
309 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
310
311 return acl;
312}
313EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
314
315void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
316{
317 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
318 cpu_relax();
319}
320
321void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
322{
323 while (atomic_read(&nacl->mib_ref_count) != 0)
324 cpu_relax();
325}
326
327void core_tpg_clear_object_luns(struct se_portal_group *tpg)
328{
329 int i, ret;
330 struct se_lun *lun;
331
332 spin_lock(&tpg->tpg_lun_lock);
333 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
334 lun = &tpg->tpg_lun_list[i];
335
336 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
337 (lun->lun_se_dev == NULL))
338 continue;
339
340 spin_unlock(&tpg->tpg_lun_lock);
341 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
342 spin_lock(&tpg->tpg_lun_lock);
343 }
344 spin_unlock(&tpg->tpg_lun_lock);
345}
346EXPORT_SYMBOL(core_tpg_clear_object_luns);
347
348/* core_tpg_add_initiator_node_acl():
349 *
350 *
351 */
352struct se_node_acl *core_tpg_add_initiator_node_acl(
353 struct se_portal_group *tpg,
354 struct se_node_acl *se_nacl,
355 const char *initiatorname,
356 u32 queue_depth)
357{
358 struct se_node_acl *acl = NULL;
359
360 spin_lock_bh(&tpg->acl_node_lock);
361 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
362 if ((acl)) {
363 if (acl->dynamic_node_acl) {
364 acl->dynamic_node_acl = 0;
365 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
366 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
367 TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
368 spin_unlock_bh(&tpg->acl_node_lock);
369 /*
370 * Release the locally allocated struct se_node_acl
371 * because * core_tpg_add_initiator_node_acl() returned
372 * a pointer to an existing demo mode node ACL.
373 */
374 if (se_nacl)
375 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
376 se_nacl);
377 goto done;
378 }
379
380 printk(KERN_ERR "ACL entry for %s Initiator"
381 " Node %s already exists for TPG %u, ignoring"
382 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
383 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
384 spin_unlock_bh(&tpg->acl_node_lock);
385 return ERR_PTR(-EEXIST);
386 }
387 spin_unlock_bh(&tpg->acl_node_lock);
388
389 if (!(se_nacl)) {
390 printk("struct se_node_acl pointer is NULL\n");
391 return ERR_PTR(-EINVAL);
392 }
393 /*
394 * For v4.x logic the se_node_acl_s is hanging off a fabric
395 * dependent structure allocated via
396 * struct target_core_fabric_ops->fabric_make_nodeacl()
397 */
398 acl = se_nacl;
399
400 INIT_LIST_HEAD(&acl->acl_list);
401 INIT_LIST_HEAD(&acl->acl_sess_list);
402 spin_lock_init(&acl->device_list_lock);
403 spin_lock_init(&acl->nacl_sess_lock);
404 atomic_set(&acl->acl_pr_ref_count, 0);
405 acl->queue_depth = queue_depth;
406 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407 acl->se_tpg = tpg;
408 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409 spin_lock_init(&acl->stats_lock);
410
411 TPG_TFO(tpg)->set_default_node_attributes(acl);
412
413 if (core_create_device_list_for_node(acl) < 0) {
414 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
415 return ERR_PTR(-ENOMEM);
416 }
417
418 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
419 core_free_device_list_for_node(acl, tpg);
420 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
421 return ERR_PTR(-EINVAL);
422 }
423
424 spin_lock_bh(&tpg->acl_node_lock);
425 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
426 tpg->num_node_acls++;
427 spin_unlock_bh(&tpg->acl_node_lock);
428
429done:
430 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
431 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
432 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
433 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
434
435 return acl;
436}
437EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
438
439/* core_tpg_del_initiator_node_acl():
440 *
441 *
442 */
443int core_tpg_del_initiator_node_acl(
444 struct se_portal_group *tpg,
445 struct se_node_acl *acl,
446 int force)
447{
448 struct se_session *sess, *sess_tmp;
449 int dynamic_acl = 0;
450
451 spin_lock_bh(&tpg->acl_node_lock);
452 if (acl->dynamic_node_acl) {
453 acl->dynamic_node_acl = 0;
454 dynamic_acl = 1;
455 }
456 list_del(&acl->acl_list);
457 tpg->num_node_acls--;
458 spin_unlock_bh(&tpg->acl_node_lock);
459
460 spin_lock_bh(&tpg->session_lock);
461 list_for_each_entry_safe(sess, sess_tmp,
462 &tpg->tpg_sess_list, sess_list) {
463 if (sess->se_node_acl != acl)
464 continue;
465 /*
466 * Determine if the session needs to be closed by our context.
467 */
468 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
469 continue;
470
471 spin_unlock_bh(&tpg->session_lock);
472 /*
473 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
474 * forcefully shutdown the $FABRIC_MOD session/nexus.
475 */
476 TPG_TFO(tpg)->close_session(sess);
477
478 spin_lock_bh(&tpg->session_lock);
479 }
480 spin_unlock_bh(&tpg->session_lock);
481
482 core_tpg_wait_for_nacl_pr_ref(acl);
483 core_tpg_wait_for_mib_ref(acl);
484 core_clear_initiator_node_from_tpg(acl, tpg);
485 core_free_device_list_for_node(acl, tpg);
486
487 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
488 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
489 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
490 TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
491
492 return 0;
493}
494EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
495
496/* core_tpg_set_initiator_node_queue_depth():
497 *
498 *
499 */
500int core_tpg_set_initiator_node_queue_depth(
501 struct se_portal_group *tpg,
502 unsigned char *initiatorname,
503 u32 queue_depth,
504 int force)
505{
506 struct se_session *sess, *init_sess = NULL;
507 struct se_node_acl *acl;
508 int dynamic_acl = 0;
509
510 spin_lock_bh(&tpg->acl_node_lock);
511 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
512 if (!(acl)) {
513 printk(KERN_ERR "Access Control List entry for %s Initiator"
514 " Node %s does not exists for TPG %hu, ignoring"
515 " request.\n", TPG_TFO(tpg)->get_fabric_name(),
516 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
517 spin_unlock_bh(&tpg->acl_node_lock);
518 return -ENODEV;
519 }
520 if (acl->dynamic_node_acl) {
521 acl->dynamic_node_acl = 0;
522 dynamic_acl = 1;
523 }
524 spin_unlock_bh(&tpg->acl_node_lock);
525
526 spin_lock_bh(&tpg->session_lock);
527 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
528 if (sess->se_node_acl != acl)
529 continue;
530
531 if (!force) {
532 printk(KERN_ERR "Unable to change queue depth for %s"
533 " Initiator Node: %s while session is"
534 " operational. To forcefully change the queue"
535 " depth and force session reinstatement"
536 " use the \"force=1\" parameter.\n",
537 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
538 spin_unlock_bh(&tpg->session_lock);
539
540 spin_lock_bh(&tpg->acl_node_lock);
541 if (dynamic_acl)
542 acl->dynamic_node_acl = 1;
543 spin_unlock_bh(&tpg->acl_node_lock);
544 return -EEXIST;
545 }
546 /*
547 * Determine if the session needs to be closed by our context.
548 */
549 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
550 continue;
551
552 init_sess = sess;
553 break;
554 }
555
556 /*
557 * User has requested to change the queue depth for a Initiator Node.
558 * Change the value in the Node's struct se_node_acl, and call
559 * core_set_queue_depth_for_node() to add the requested queue depth.
560 *
561 * Finally call TPG_TFO(tpg)->close_session() to force session
562 * reinstatement to occur if there is an active session for the
563 * $FABRIC_MOD Initiator Node in question.
564 */
565 acl->queue_depth = queue_depth;
566
567 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
568 spin_unlock_bh(&tpg->session_lock);
569 /*
570 * Force session reinstatement if
571 * core_set_queue_depth_for_node() failed, because we assume
572 * the $FABRIC_MOD has already the set session reinstatement
573 * bit from TPG_TFO(tpg)->shutdown_session() called above.
574 */
575 if (init_sess)
576 TPG_TFO(tpg)->close_session(init_sess);
577
578 spin_lock_bh(&tpg->acl_node_lock);
579 if (dynamic_acl)
580 acl->dynamic_node_acl = 1;
581 spin_unlock_bh(&tpg->acl_node_lock);
582 return -EINVAL;
583 }
584 spin_unlock_bh(&tpg->session_lock);
585 /*
586 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
587 * forcefully shutdown the $FABRIC_MOD session/nexus.
588 */
589 if (init_sess)
590 TPG_TFO(tpg)->close_session(init_sess);
591
592 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
593 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
594 initiatorname, TPG_TFO(tpg)->get_fabric_name(),
595 TPG_TFO(tpg)->tpg_get_tag(tpg));
596
597 spin_lock_bh(&tpg->acl_node_lock);
598 if (dynamic_acl)
599 acl->dynamic_node_acl = 1;
600 spin_unlock_bh(&tpg->acl_node_lock);
601
602 return 0;
603}
604EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
605
606static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
607{
608 /* Set in core_dev_setup_virtual_lun0() */
609 struct se_device *dev = se_global->g_lun0_dev;
610 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
611 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
612 int ret;
613
614 lun->unpacked_lun = 0;
615 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
616 atomic_set(&lun->lun_acl_count, 0);
617 init_completion(&lun->lun_shutdown_comp);
618 INIT_LIST_HEAD(&lun->lun_acl_list);
619 INIT_LIST_HEAD(&lun->lun_cmd_list);
620 spin_lock_init(&lun->lun_acl_lock);
621 spin_lock_init(&lun->lun_cmd_lock);
622 spin_lock_init(&lun->lun_sep_lock);
623
624 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
625 if (ret < 0)
626 return -1;
627
628 return 0;
629}
630
631static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
632{
633 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
634
635 core_tpg_post_dellun(se_tpg, lun);
636}
637
638int core_tpg_register(
639 struct target_core_fabric_ops *tfo,
640 struct se_wwn *se_wwn,
641 struct se_portal_group *se_tpg,
642 void *tpg_fabric_ptr,
643 int se_tpg_type)
644{
645 struct se_lun *lun;
646 u32 i;
647
648 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
649 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
650 if (!(se_tpg->tpg_lun_list)) {
651 printk(KERN_ERR "Unable to allocate struct se_portal_group->"
652 "tpg_lun_list\n");
653 return -ENOMEM;
654 }
655
656 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
657 lun = &se_tpg->tpg_lun_list[i];
658 lun->unpacked_lun = i;
659 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
660 atomic_set(&lun->lun_acl_count, 0);
661 init_completion(&lun->lun_shutdown_comp);
662 INIT_LIST_HEAD(&lun->lun_acl_list);
663 INIT_LIST_HEAD(&lun->lun_cmd_list);
664 spin_lock_init(&lun->lun_acl_lock);
665 spin_lock_init(&lun->lun_cmd_lock);
666 spin_lock_init(&lun->lun_sep_lock);
667 }
668
669 se_tpg->se_tpg_type = se_tpg_type;
670 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
671 se_tpg->se_tpg_tfo = tfo;
672 se_tpg->se_tpg_wwn = se_wwn;
673 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
674 INIT_LIST_HEAD(&se_tpg->acl_node_list);
675 INIT_LIST_HEAD(&se_tpg->se_tpg_list);
676 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
677 spin_lock_init(&se_tpg->acl_node_lock);
678 spin_lock_init(&se_tpg->session_lock);
679 spin_lock_init(&se_tpg->tpg_lun_lock);
680
681 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
682 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
683 kfree(se_tpg);
684 return -ENOMEM;
685 }
686 }
687
688 spin_lock_bh(&se_global->se_tpg_lock);
689 list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
690 spin_unlock_bh(&se_global->se_tpg_lock);
691
692 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
693 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
694 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
695 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
696 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
697
698 return 0;
699}
700EXPORT_SYMBOL(core_tpg_register);
701
702int core_tpg_deregister(struct se_portal_group *se_tpg)
703{
704 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
705 " for endpoint: %s Portal Tag %u\n",
706 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
707 "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
708 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
709 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
710
711 spin_lock_bh(&se_global->se_tpg_lock);
712 list_del(&se_tpg->se_tpg_list);
713 spin_unlock_bh(&se_global->se_tpg_lock);
714
715 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
716 cpu_relax();
717
718 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
719 core_tpg_release_virtual_lun0(se_tpg);
720
721 se_tpg->se_tpg_fabric_ptr = NULL;
722 kfree(se_tpg->tpg_lun_list);
723 return 0;
724}
725EXPORT_SYMBOL(core_tpg_deregister);
726
727struct se_lun *core_tpg_pre_addlun(
728 struct se_portal_group *tpg,
729 u32 unpacked_lun)
730{
731 struct se_lun *lun;
732
733 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
734 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
735 "-1: %u for Target Portal Group: %u\n",
736 TPG_TFO(tpg)->get_fabric_name(),
737 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
738 TPG_TFO(tpg)->tpg_get_tag(tpg));
739 return ERR_PTR(-EOVERFLOW);
740 }
741
742 spin_lock(&tpg->tpg_lun_lock);
743 lun = &tpg->tpg_lun_list[unpacked_lun];
744 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
745 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
746 " on %s Target Portal Group: %u, ignoring request.\n",
747 unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
748 TPG_TFO(tpg)->tpg_get_tag(tpg));
749 spin_unlock(&tpg->tpg_lun_lock);
750 return ERR_PTR(-EINVAL);
751 }
752 spin_unlock(&tpg->tpg_lun_lock);
753
754 return lun;
755}
756
757int core_tpg_post_addlun(
758 struct se_portal_group *tpg,
759 struct se_lun *lun,
760 u32 lun_access,
761 void *lun_ptr)
762{
763 if (core_dev_export(lun_ptr, tpg, lun) < 0)
764 return -1;
765
766 spin_lock(&tpg->tpg_lun_lock);
767 lun->lun_access = lun_access;
768 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
769 spin_unlock(&tpg->tpg_lun_lock);
770
771 return 0;
772}
773
774static void core_tpg_shutdown_lun(
775 struct se_portal_group *tpg,
776 struct se_lun *lun)
777{
778 core_clear_lun_from_tpg(lun, tpg);
779 transport_clear_lun_from_sessions(lun);
780}
781
782struct se_lun *core_tpg_pre_dellun(
783 struct se_portal_group *tpg,
784 u32 unpacked_lun,
785 int *ret)
786{
787 struct se_lun *lun;
788
789 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
790 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
791 "-1: %u for Target Portal Group: %u\n",
792 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
793 TRANSPORT_MAX_LUNS_PER_TPG-1,
794 TPG_TFO(tpg)->tpg_get_tag(tpg));
795 return ERR_PTR(-EOVERFLOW);
796 }
797
798 spin_lock(&tpg->tpg_lun_lock);
799 lun = &tpg->tpg_lun_list[unpacked_lun];
800 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
801 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
802 " Target Portal Group: %u, ignoring request.\n",
803 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
804 TPG_TFO(tpg)->tpg_get_tag(tpg));
805 spin_unlock(&tpg->tpg_lun_lock);
806 return ERR_PTR(-ENODEV);
807 }
808 spin_unlock(&tpg->tpg_lun_lock);
809
810 return lun;
811}
812
813int core_tpg_post_dellun(
814 struct se_portal_group *tpg,
815 struct se_lun *lun)
816{
817 core_tpg_shutdown_lun(tpg, lun);
818
819 core_dev_unexport(lun->lun_se_dev, tpg, lun);
820
821 spin_lock(&tpg->tpg_lun_lock);
822 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
823 spin_unlock(&tpg->tpg_lun_lock);
824
825 return 0;
826}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 000000000000..28b6292ff298
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6134 @@
1/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
29#include <linux/version.h>
30#include <linux/net.h>
31#include <linux/delay.h>
32#include <linux/string.h>
33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/blkdev.h>
36#include <linux/spinlock.h>
37#include <linux/smp_lock.h>
38#include <linux/kthread.h>
39#include <linux/in.h>
40#include <linux/cdrom.h>
41#include <asm/unaligned.h>
42#include <net/sock.h>
43#include <net/tcp.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_cmnd.h>
46#include <scsi/libsas.h> /* For TASK_ATTR_* */
47
48#include <target/target_core_base.h>
49#include <target/target_core_device.h>
50#include <target/target_core_tmr.h>
51#include <target/target_core_tpg.h>
52#include <target/target_core_transport.h>
53#include <target/target_core_fabric_ops.h>
54#include <target/target_core_configfs.h>
55
56#include "target_core_alua.h"
57#include "target_core_hba.h"
58#include "target_core_pr.h"
59#include "target_core_scdb.h"
60#include "target_core_ua.h"
61
62/* #define DEBUG_CDB_HANDLER */
63#ifdef DEBUG_CDB_HANDLER
64#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
65#else
66#define DEBUG_CDB_H(x...)
67#endif
68
69/* #define DEBUG_CMD_MAP */
70#ifdef DEBUG_CMD_MAP
71#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
72#else
73#define DEBUG_CMD_M(x...)
74#endif
75
76/* #define DEBUG_MEM_ALLOC */
77#ifdef DEBUG_MEM_ALLOC
78#define DEBUG_MEM(x...) printk(KERN_INFO x)
79#else
80#define DEBUG_MEM(x...)
81#endif
82
83/* #define DEBUG_MEM2_ALLOC */
84#ifdef DEBUG_MEM2_ALLOC
85#define DEBUG_MEM2(x...) printk(KERN_INFO x)
86#else
87#define DEBUG_MEM2(x...)
88#endif
89
90/* #define DEBUG_SG_CALC */
91#ifdef DEBUG_SG_CALC
92#define DEBUG_SC(x...) printk(KERN_INFO x)
93#else
94#define DEBUG_SC(x...)
95#endif
96
97/* #define DEBUG_SE_OBJ */
98#ifdef DEBUG_SE_OBJ
99#define DEBUG_SO(x...) printk(KERN_INFO x)
100#else
101#define DEBUG_SO(x...)
102#endif
103
104/* #define DEBUG_CMD_VOL */
105#ifdef DEBUG_CMD_VOL
106#define DEBUG_VOL(x...) printk(KERN_INFO x)
107#else
108#define DEBUG_VOL(x...)
109#endif
110
111/* #define DEBUG_CMD_STOP */
112#ifdef DEBUG_CMD_STOP
113#define DEBUG_CS(x...) printk(KERN_INFO x)
114#else
115#define DEBUG_CS(x...)
116#endif
117
118/* #define DEBUG_PASSTHROUGH */
119#ifdef DEBUG_PASSTHROUGH
120#define DEBUG_PT(x...) printk(KERN_INFO x)
121#else
122#define DEBUG_PT(x...)
123#endif
124
125/* #define DEBUG_TASK_STOP */
126#ifdef DEBUG_TASK_STOP
127#define DEBUG_TS(x...) printk(KERN_INFO x)
128#else
129#define DEBUG_TS(x...)
130#endif
131
132/* #define DEBUG_TRANSPORT_STOP */
133#ifdef DEBUG_TRANSPORT_STOP
134#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
135#else
136#define DEBUG_TRANSPORT_S(x...)
137#endif
138
139/* #define DEBUG_TASK_FAILURE */
140#ifdef DEBUG_TASK_FAILURE
141#define DEBUG_TF(x...) printk(KERN_INFO x)
142#else
143#define DEBUG_TF(x...)
144#endif
145
146/* #define DEBUG_DEV_OFFLINE */
147#ifdef DEBUG_DEV_OFFLINE
148#define DEBUG_DO(x...) printk(KERN_INFO x)
149#else
150#define DEBUG_DO(x...)
151#endif
152
153/* #define DEBUG_TASK_STATE */
154#ifdef DEBUG_TASK_STATE
155#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
156#else
157#define DEBUG_TSTATE(x...)
158#endif
159
160/* #define DEBUG_STATUS_THR */
161#ifdef DEBUG_STATUS_THR
162#define DEBUG_ST(x...) printk(KERN_INFO x)
163#else
164#define DEBUG_ST(x...)
165#endif
166
167/* #define DEBUG_TASK_TIMEOUT */
168#ifdef DEBUG_TASK_TIMEOUT
169#define DEBUG_TT(x...) printk(KERN_INFO x)
170#else
171#define DEBUG_TT(x...)
172#endif
173
174/* #define DEBUG_GENERIC_REQUEST_FAILURE */
175#ifdef DEBUG_GENERIC_REQUEST_FAILURE
176#define DEBUG_GRF(x...) printk(KERN_INFO x)
177#else
178#define DEBUG_GRF(x...)
179#endif
180
181/* #define DEBUG_SAM_TASK_ATTRS */
182#ifdef DEBUG_SAM_TASK_ATTRS
183#define DEBUG_STA(x...) printk(KERN_INFO x)
184#else
185#define DEBUG_STA(x...)
186#endif
187
188struct se_global *se_global;
189
190static struct kmem_cache *se_cmd_cache;
191static struct kmem_cache *se_sess_cache;
192struct kmem_cache *se_tmr_req_cache;
193struct kmem_cache *se_ua_cache;
194struct kmem_cache *se_mem_cache;
195struct kmem_cache *t10_pr_reg_cache;
196struct kmem_cache *t10_alua_lu_gp_cache;
197struct kmem_cache *t10_alua_lu_gp_mem_cache;
198struct kmem_cache *t10_alua_tg_pt_gp_cache;
199struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
200
201/* Used for transport_dev_get_map_*() */
202typedef int (*map_func_t)(struct se_task *, u32);
203
204static int transport_generic_write_pending(struct se_cmd *);
205static int transport_processing_thread(void *);
206static int __transport_execute_tasks(struct se_device *dev);
207static void transport_complete_task_attr(struct se_cmd *cmd);
208static void transport_direct_request_timeout(struct se_cmd *cmd);
209static void transport_free_dev_tasks(struct se_cmd *cmd);
210static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
211 unsigned long long starting_lba, u32 sectors,
212 enum dma_data_direction data_direction,
213 struct list_head *mem_list, int set_counts);
214static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
215 u32 dma_size);
216static int transport_generic_remove(struct se_cmd *cmd,
217 int release_to_pool, int session_reinstatement);
218static int transport_get_sectors(struct se_cmd *cmd);
219static struct list_head *transport_init_se_mem_list(void);
220static int transport_map_sg_to_mem(struct se_cmd *cmd,
221 struct list_head *se_mem_list, void *in_mem,
222 u32 *se_mem_cnt);
223static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
224 unsigned char *dst, struct list_head *se_mem_list);
225static void transport_release_fe_cmd(struct se_cmd *cmd);
226static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
227 struct se_queue_obj *qobj);
228static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
229static void transport_stop_all_task_timers(struct se_cmd *cmd);
230
231int transport_emulate_control_cdb(struct se_task *task);
232
233int init_se_global(void)
234{
235 struct se_global *global;
236
237 global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
238 if (!(global)) {
239 printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
240 return -1;
241 }
242
243 INIT_LIST_HEAD(&global->g_lu_gps_list);
244 INIT_LIST_HEAD(&global->g_se_tpg_list);
245 INIT_LIST_HEAD(&global->g_hba_list);
246 INIT_LIST_HEAD(&global->g_se_dev_list);
247 spin_lock_init(&global->g_device_lock);
248 spin_lock_init(&global->hba_lock);
249 spin_lock_init(&global->se_tpg_lock);
250 spin_lock_init(&global->lu_gps_lock);
251 spin_lock_init(&global->plugin_class_lock);
252
253 se_cmd_cache = kmem_cache_create("se_cmd_cache",
254 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
255 if (!(se_cmd_cache)) {
256 printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
257 goto out;
258 }
259 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
260 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
261 0, NULL);
262 if (!(se_tmr_req_cache)) {
263 printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
264 " failed\n");
265 goto out;
266 }
267 se_sess_cache = kmem_cache_create("se_sess_cache",
268 sizeof(struct se_session), __alignof__(struct se_session),
269 0, NULL);
270 if (!(se_sess_cache)) {
271 printk(KERN_ERR "kmem_cache_create() for struct se_session"
272 " failed\n");
273 goto out;
274 }
275 se_ua_cache = kmem_cache_create("se_ua_cache",
276 sizeof(struct se_ua), __alignof__(struct se_ua),
277 0, NULL);
278 if (!(se_ua_cache)) {
279 printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
280 goto out;
281 }
282 se_mem_cache = kmem_cache_create("se_mem_cache",
283 sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
284 if (!(se_mem_cache)) {
285 printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
286 goto out;
287 }
288 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
289 sizeof(struct t10_pr_registration),
290 __alignof__(struct t10_pr_registration), 0, NULL);
291 if (!(t10_pr_reg_cache)) {
292 printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
293 " failed\n");
294 goto out;
295 }
296 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
297 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
298 0, NULL);
299 if (!(t10_alua_lu_gp_cache)) {
300 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
301 " failed\n");
302 goto out;
303 }
304 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
305 sizeof(struct t10_alua_lu_gp_member),
306 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
307 if (!(t10_alua_lu_gp_mem_cache)) {
308 printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
309 "cache failed\n");
310 goto out;
311 }
312 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
313 sizeof(struct t10_alua_tg_pt_gp),
314 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
315 if (!(t10_alua_tg_pt_gp_cache)) {
316 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
317 "cache failed\n");
318 goto out;
319 }
320 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
321 "t10_alua_tg_pt_gp_mem_cache",
322 sizeof(struct t10_alua_tg_pt_gp_member),
323 __alignof__(struct t10_alua_tg_pt_gp_member),
324 0, NULL);
325 if (!(t10_alua_tg_pt_gp_mem_cache)) {
326 printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
327 "mem_t failed\n");
328 goto out;
329 }
330
331 se_global = global;
332
333 return 0;
334out:
335 if (se_cmd_cache)
336 kmem_cache_destroy(se_cmd_cache);
337 if (se_tmr_req_cache)
338 kmem_cache_destroy(se_tmr_req_cache);
339 if (se_sess_cache)
340 kmem_cache_destroy(se_sess_cache);
341 if (se_ua_cache)
342 kmem_cache_destroy(se_ua_cache);
343 if (se_mem_cache)
344 kmem_cache_destroy(se_mem_cache);
345 if (t10_pr_reg_cache)
346 kmem_cache_destroy(t10_pr_reg_cache);
347 if (t10_alua_lu_gp_cache)
348 kmem_cache_destroy(t10_alua_lu_gp_cache);
349 if (t10_alua_lu_gp_mem_cache)
350 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
351 if (t10_alua_tg_pt_gp_cache)
352 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
353 if (t10_alua_tg_pt_gp_mem_cache)
354 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
355 kfree(global);
356 return -1;
357}
358
359void release_se_global(void)
360{
361 struct se_global *global;
362
363 global = se_global;
364 if (!(global))
365 return;
366
367 kmem_cache_destroy(se_cmd_cache);
368 kmem_cache_destroy(se_tmr_req_cache);
369 kmem_cache_destroy(se_sess_cache);
370 kmem_cache_destroy(se_ua_cache);
371 kmem_cache_destroy(se_mem_cache);
372 kmem_cache_destroy(t10_pr_reg_cache);
373 kmem_cache_destroy(t10_alua_lu_gp_cache);
374 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
375 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
376 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
377 kfree(global);
378
379 se_global = NULL;
380}
381
382void transport_init_queue_obj(struct se_queue_obj *qobj)
383{
384 atomic_set(&qobj->queue_cnt, 0);
385 INIT_LIST_HEAD(&qobj->qobj_list);
386 init_waitqueue_head(&qobj->thread_wq);
387 spin_lock_init(&qobj->cmd_queue_lock);
388}
389EXPORT_SYMBOL(transport_init_queue_obj);
390
391static int transport_subsystem_reqmods(void)
392{
393 int ret;
394
395 ret = request_module("target_core_iblock");
396 if (ret != 0)
397 printk(KERN_ERR "Unable to load target_core_iblock\n");
398
399 ret = request_module("target_core_file");
400 if (ret != 0)
401 printk(KERN_ERR "Unable to load target_core_file\n");
402
403 ret = request_module("target_core_pscsi");
404 if (ret != 0)
405 printk(KERN_ERR "Unable to load target_core_pscsi\n");
406
407 ret = request_module("target_core_stgt");
408 if (ret != 0)
409 printk(KERN_ERR "Unable to load target_core_stgt\n");
410
411 return 0;
412}
413
414int transport_subsystem_check_init(void)
415{
416 if (se_global->g_sub_api_initialized)
417 return 0;
418 /*
419 * Request the loading of known TCM subsystem plugins..
420 */
421 if (transport_subsystem_reqmods() < 0)
422 return -1;
423
424 se_global->g_sub_api_initialized = 1;
425 return 0;
426}
427
428struct se_session *transport_init_session(void)
429{
430 struct se_session *se_sess;
431
432 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
433 if (!(se_sess)) {
434 printk(KERN_ERR "Unable to allocate struct se_session from"
435 " se_sess_cache\n");
436 return ERR_PTR(-ENOMEM);
437 }
438 INIT_LIST_HEAD(&se_sess->sess_list);
439 INIT_LIST_HEAD(&se_sess->sess_acl_list);
440 atomic_set(&se_sess->mib_ref_count, 0);
441
442 return se_sess;
443}
444EXPORT_SYMBOL(transport_init_session);
445
446/*
447 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
448 */
449void __transport_register_session(
450 struct se_portal_group *se_tpg,
451 struct se_node_acl *se_nacl,
452 struct se_session *se_sess,
453 void *fabric_sess_ptr)
454{
455 unsigned char buf[PR_REG_ISID_LEN];
456
457 se_sess->se_tpg = se_tpg;
458 se_sess->fabric_sess_ptr = fabric_sess_ptr;
459 /*
460 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
461 *
462 * Only set for struct se_session's that will actually be moving I/O.
463 * eg: *NOT* discovery sessions.
464 */
465 if (se_nacl) {
466 /*
467 * If the fabric module supports an ISID based TransportID,
468 * save this value in binary from the fabric I_T Nexus now.
469 */
470 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
471 memset(&buf[0], 0, PR_REG_ISID_LEN);
472 TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
473 &buf[0], PR_REG_ISID_LEN);
474 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
475 }
476 spin_lock_irq(&se_nacl->nacl_sess_lock);
477 /*
478 * The se_nacl->nacl_sess pointer will be set to the
479 * last active I_T Nexus for each struct se_node_acl.
480 */
481 se_nacl->nacl_sess = se_sess;
482
483 list_add_tail(&se_sess->sess_acl_list,
484 &se_nacl->acl_sess_list);
485 spin_unlock_irq(&se_nacl->nacl_sess_lock);
486 }
487 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
488
489 printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
490 TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
491}
492EXPORT_SYMBOL(__transport_register_session);
493
494void transport_register_session(
495 struct se_portal_group *se_tpg,
496 struct se_node_acl *se_nacl,
497 struct se_session *se_sess,
498 void *fabric_sess_ptr)
499{
500 spin_lock_bh(&se_tpg->session_lock);
501 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
502 spin_unlock_bh(&se_tpg->session_lock);
503}
504EXPORT_SYMBOL(transport_register_session);
505
506void transport_deregister_session_configfs(struct se_session *se_sess)
507{
508 struct se_node_acl *se_nacl;
509
510 /*
511 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
512 */
513 se_nacl = se_sess->se_node_acl;
514 if ((se_nacl)) {
515 spin_lock_irq(&se_nacl->nacl_sess_lock);
516 list_del(&se_sess->sess_acl_list);
517 /*
518 * If the session list is empty, then clear the pointer.
519 * Otherwise, set the struct se_session pointer from the tail
520 * element of the per struct se_node_acl active session list.
521 */
522 if (list_empty(&se_nacl->acl_sess_list))
523 se_nacl->nacl_sess = NULL;
524 else {
525 se_nacl->nacl_sess = container_of(
526 se_nacl->acl_sess_list.prev,
527 struct se_session, sess_acl_list);
528 }
529 spin_unlock_irq(&se_nacl->nacl_sess_lock);
530 }
531}
532EXPORT_SYMBOL(transport_deregister_session_configfs);
533
534void transport_free_session(struct se_session *se_sess)
535{
536 kmem_cache_free(se_sess_cache, se_sess);
537}
538EXPORT_SYMBOL(transport_free_session);
539
540void transport_deregister_session(struct se_session *se_sess)
541{
542 struct se_portal_group *se_tpg = se_sess->se_tpg;
543 struct se_node_acl *se_nacl;
544
545 if (!(se_tpg)) {
546 transport_free_session(se_sess);
547 return;
548 }
549 /*
550 * Wait for possible reference in drivers/target/target_core_mib.c:
551 * scsi_att_intr_port_seq_show()
552 */
553 while (atomic_read(&se_sess->mib_ref_count) != 0)
554 cpu_relax();
555
556 spin_lock_bh(&se_tpg->session_lock);
557 list_del(&se_sess->sess_list);
558 se_sess->se_tpg = NULL;
559 se_sess->fabric_sess_ptr = NULL;
560 spin_unlock_bh(&se_tpg->session_lock);
561
562 /*
563 * Determine if we need to do extra work for this initiator node's
564 * struct se_node_acl if it had been previously dynamically generated.
565 */
566 se_nacl = se_sess->se_node_acl;
567 if ((se_nacl)) {
568 spin_lock_bh(&se_tpg->acl_node_lock);
569 if (se_nacl->dynamic_node_acl) {
570 if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
571 se_tpg))) {
572 list_del(&se_nacl->acl_list);
573 se_tpg->num_node_acls--;
574 spin_unlock_bh(&se_tpg->acl_node_lock);
575
576 core_tpg_wait_for_nacl_pr_ref(se_nacl);
577 core_tpg_wait_for_mib_ref(se_nacl);
578 core_free_device_list_for_node(se_nacl, se_tpg);
579 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
580 se_nacl);
581 spin_lock_bh(&se_tpg->acl_node_lock);
582 }
583 }
584 spin_unlock_bh(&se_tpg->acl_node_lock);
585 }
586
587 transport_free_session(se_sess);
588
589 printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
590 TPG_TFO(se_tpg)->get_fabric_name());
591}
592EXPORT_SYMBOL(transport_deregister_session);
593
594/*
595 * Called with T_TASK(cmd)->t_state_lock held.
596 */
597static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
598{
599 struct se_device *dev;
600 struct se_task *task;
601 unsigned long flags;
602
603 if (!T_TASK(cmd))
604 return;
605
606 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
607 dev = task->se_dev;
608 if (!(dev))
609 continue;
610
611 if (atomic_read(&task->task_active))
612 continue;
613
614 if (!(atomic_read(&task->task_state_active)))
615 continue;
616
617 spin_lock_irqsave(&dev->execute_task_lock, flags);
618 list_del(&task->t_state_list);
619 DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
620 CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
621 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
622
623 atomic_set(&task->task_state_active, 0);
624 atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
625 }
626}
627
628/* transport_cmd_check_stop():
629 *
630 * 'transport_off = 1' determines if t_transport_active should be cleared.
631 * 'transport_off = 2' determines if task_dev_state should be removed.
632 *
633 * A non-zero u8 t_state sets cmd->t_state.
634 * Returns 1 when command is stopped, else 0.
635 */
636static int transport_cmd_check_stop(
637 struct se_cmd *cmd,
638 int transport_off,
639 u8 t_state)
640{
641 unsigned long flags;
642
643 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
644 /*
645 * Determine if IOCTL context caller in requesting the stopping of this
646 * command for LUN shutdown purposes.
647 */
648 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
649 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
650 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
651 CMD_TFO(cmd)->get_task_tag(cmd));
652
653 cmd->deferred_t_state = cmd->t_state;
654 cmd->t_state = TRANSPORT_DEFERRED_CMD;
655 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
656 if (transport_off == 2)
657 transport_all_task_dev_remove_state(cmd);
658 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
659
660 complete(&T_TASK(cmd)->transport_lun_stop_comp);
661 return 1;
662 }
663 /*
664 * Determine if frontend context caller is requesting the stopping of
665 * this command for frontend excpections.
666 */
667 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
668 DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
669 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
670 CMD_TFO(cmd)->get_task_tag(cmd));
671
672 cmd->deferred_t_state = cmd->t_state;
673 cmd->t_state = TRANSPORT_DEFERRED_CMD;
674 if (transport_off == 2)
675 transport_all_task_dev_remove_state(cmd);
676
677 /*
678 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
679 * to FE.
680 */
681 if (transport_off == 2)
682 cmd->se_lun = NULL;
683 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
684
685 complete(&T_TASK(cmd)->t_transport_stop_comp);
686 return 1;
687 }
688 if (transport_off) {
689 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
690 if (transport_off == 2) {
691 transport_all_task_dev_remove_state(cmd);
692 /*
693 * Clear struct se_cmd->se_lun before the transport_off == 2
694 * handoff to fabric module.
695 */
696 cmd->se_lun = NULL;
697 /*
698 * Some fabric modules like tcm_loop can release
699 * their internally allocated I/O refrence now and
700 * struct se_cmd now.
701 */
702 if (CMD_TFO(cmd)->check_stop_free != NULL) {
703 spin_unlock_irqrestore(
704 &T_TASK(cmd)->t_state_lock, flags);
705
706 CMD_TFO(cmd)->check_stop_free(cmd);
707 return 1;
708 }
709 }
710 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
711
712 return 0;
713 } else if (t_state)
714 cmd->t_state = t_state;
715 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
716
717 return 0;
718}
719
720static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
721{
722 return transport_cmd_check_stop(cmd, 2, 0);
723}
724
725static void transport_lun_remove_cmd(struct se_cmd *cmd)
726{
727 struct se_lun *lun = SE_LUN(cmd);
728 unsigned long flags;
729
730 if (!lun)
731 return;
732
733 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
734 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
735 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
736 goto check_lun;
737 }
738 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
739 transport_all_task_dev_remove_state(cmd);
740 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
741
742 transport_free_dev_tasks(cmd);
743
744check_lun:
745 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
746 if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
747 list_del(&cmd->se_lun_list);
748 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
749#if 0
750 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
751 CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
752#endif
753 }
754 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
755}
756
757void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
758{
759 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
760 transport_lun_remove_cmd(cmd);
761
762 if (transport_cmd_check_stop_to_fabric(cmd))
763 return;
764 if (remove)
765 transport_generic_remove(cmd, 0, 0);
766}
767
768void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
769{
770 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
771
772 if (transport_cmd_check_stop_to_fabric(cmd))
773 return;
774
775 transport_generic_remove(cmd, 0, 0);
776}
777
778static int transport_add_cmd_to_queue(
779 struct se_cmd *cmd,
780 int t_state)
781{
782 struct se_device *dev = cmd->se_dev;
783 struct se_queue_obj *qobj = dev->dev_queue_obj;
784 struct se_queue_req *qr;
785 unsigned long flags;
786
787 qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
788 if (!(qr)) {
789 printk(KERN_ERR "Unable to allocate memory for"
790 " struct se_queue_req\n");
791 return -1;
792 }
793 INIT_LIST_HEAD(&qr->qr_list);
794
795 qr->cmd = (void *)cmd;
796 qr->state = t_state;
797
798 if (t_state) {
799 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
800 cmd->t_state = t_state;
801 atomic_set(&T_TASK(cmd)->t_transport_active, 1);
802 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
803 }
804
805 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
806 list_add_tail(&qr->qr_list, &qobj->qobj_list);
807 atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
808 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
809
810 atomic_inc(&qobj->queue_cnt);
811 wake_up_interruptible(&qobj->thread_wq);
812 return 0;
813}
814
815/*
816 * Called with struct se_queue_obj->cmd_queue_lock held.
817 */
818static struct se_queue_req *
819__transport_get_qr_from_queue(struct se_queue_obj *qobj)
820{
821 struct se_cmd *cmd;
822 struct se_queue_req *qr = NULL;
823
824 if (list_empty(&qobj->qobj_list))
825 return NULL;
826
827 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
828 break;
829
830 if (qr->cmd) {
831 cmd = (struct se_cmd *)qr->cmd;
832 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
833 }
834 list_del(&qr->qr_list);
835 atomic_dec(&qobj->queue_cnt);
836
837 return qr;
838}
839
840static struct se_queue_req *
841transport_get_qr_from_queue(struct se_queue_obj *qobj)
842{
843 struct se_cmd *cmd;
844 struct se_queue_req *qr;
845 unsigned long flags;
846
847 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
848 if (list_empty(&qobj->qobj_list)) {
849 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
850 return NULL;
851 }
852
853 list_for_each_entry(qr, &qobj->qobj_list, qr_list)
854 break;
855
856 if (qr->cmd) {
857 cmd = (struct se_cmd *)qr->cmd;
858 atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
859 }
860 list_del(&qr->qr_list);
861 atomic_dec(&qobj->queue_cnt);
862 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
863
864 return qr;
865}
866
867static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
868 struct se_queue_obj *qobj)
869{
870 struct se_cmd *q_cmd;
871 struct se_queue_req *qr = NULL, *qr_p = NULL;
872 unsigned long flags;
873
874 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
875 if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
876 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
877 return;
878 }
879
880 list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
881 q_cmd = (struct se_cmd *)qr->cmd;
882 if (q_cmd != cmd)
883 continue;
884
885 atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
886 atomic_dec(&qobj->queue_cnt);
887 list_del(&qr->qr_list);
888 kfree(qr);
889 }
890 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
891
892 if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
893 printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
894 CMD_TFO(cmd)->get_task_tag(cmd),
895 atomic_read(&T_TASK(cmd)->t_transport_queue_active));
896 }
897}
898
899/*
900 * Completion function used by TCM subsystem plugins (such as FILEIO)
901 * for queueing up response from struct se_subsystem_api->do_task()
902 */
903void transport_complete_sync_cache(struct se_cmd *cmd, int good)
904{
905 struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
906 struct se_task, t_list);
907
908 if (good) {
909 cmd->scsi_status = SAM_STAT_GOOD;
910 task->task_scsi_status = GOOD;
911 } else {
912 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
913 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
914 TASK_CMD(task)->transport_error_status =
915 PYX_TRANSPORT_ILLEGAL_REQUEST;
916 }
917
918 transport_complete_task(task, good);
919}
920EXPORT_SYMBOL(transport_complete_sync_cache);
921
922/* transport_complete_task():
923 *
924 * Called from interrupt and non interrupt context depending
925 * on the transport plugin.
926 */
927void transport_complete_task(struct se_task *task, int success)
928{
929 struct se_cmd *cmd = TASK_CMD(task);
930 struct se_device *dev = task->se_dev;
931 int t_state;
932 unsigned long flags;
933#if 0
934 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
935 T_TASK(cmd)->t_task_cdb[0], dev);
936#endif
937 if (dev) {
938 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
939 atomic_inc(&dev->depth_left);
940 atomic_inc(&SE_HBA(dev)->left_queue_depth);
941 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
942 }
943
944 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
945 atomic_set(&task->task_active, 0);
946
947 /*
948 * See if any sense data exists, if so set the TASK_SENSE flag.
949 * Also check for any other post completion work that needs to be
950 * done by the plugins.
951 */
952 if (dev && dev->transport->transport_complete) {
953 if (dev->transport->transport_complete(task) != 0) {
954 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
955 task->task_sense = 1;
956 success = 1;
957 }
958 }
959
960 /*
961 * See if we are waiting for outstanding struct se_task
962 * to complete for an exception condition
963 */
964 if (atomic_read(&task->task_stop)) {
965 /*
966 * Decrement T_TASK(cmd)->t_se_count if this task had
967 * previously thrown its timeout exception handler.
968 */
969 if (atomic_read(&task->task_timeout)) {
970 atomic_dec(&T_TASK(cmd)->t_se_count);
971 atomic_set(&task->task_timeout, 0);
972 }
973 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
974
975 complete(&task->task_stop_comp);
976 return;
977 }
978 /*
979 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
980 * left counter to determine when the struct se_cmd is ready to be queued to
981 * the processing thread.
982 */
983 if (atomic_read(&task->task_timeout)) {
984 if (!(atomic_dec_and_test(
985 &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
986 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
987 flags);
988 return;
989 }
990 t_state = TRANSPORT_COMPLETE_TIMEOUT;
991 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
992
993 transport_add_cmd_to_queue(cmd, t_state);
994 return;
995 }
996 atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
997
998 /*
999 * Decrement the outstanding t_task_cdbs_left count. The last
1000 * struct se_task from struct se_cmd will complete itself into the
1001 * device queue depending upon int success.
1002 */
1003 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
1004 if (!success)
1005 T_TASK(cmd)->t_tasks_failed = 1;
1006
1007 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1008 return;
1009 }
1010
1011 if (!success || T_TASK(cmd)->t_tasks_failed) {
1012 t_state = TRANSPORT_COMPLETE_FAILURE;
1013 if (!task->task_error_status) {
1014 task->task_error_status =
1015 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1016 cmd->transport_error_status =
1017 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1018 }
1019 } else {
1020 atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
1021 t_state = TRANSPORT_COMPLETE_OK;
1022 }
1023 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1024
1025 transport_add_cmd_to_queue(cmd, t_state);
1026}
1027EXPORT_SYMBOL(transport_complete_task);
1028
1029/*
1030 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
1031 * struct se_task list are ready to be added to the active execution list
1032 * struct se_device
1033
1034 * Called with se_dev_t->execute_task_lock called.
1035 */
1036static inline int transport_add_task_check_sam_attr(
1037 struct se_task *task,
1038 struct se_task *task_prev,
1039 struct se_device *dev)
1040{
1041 /*
1042 * No SAM Task attribute emulation enabled, add to tail of
1043 * execution queue
1044 */
1045 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
1046 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1047 return 0;
1048 }
1049 /*
1050 * HEAD_OF_QUEUE attribute for received CDB, which means
1051 * the first task that is associated with a struct se_cmd goes to
1052 * head of the struct se_device->execute_task_list, and task_prev
1053 * after that for each subsequent task
1054 */
1055 if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
1056 list_add(&task->t_execute_list,
1057 (task_prev != NULL) ?
1058 &task_prev->t_execute_list :
1059 &dev->execute_task_list);
1060
1061 DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
1062 " in execution queue\n",
1063 T_TASK(task->task_se_cmd)->t_task_cdb[0]);
1064 return 1;
1065 }
1066 /*
1067 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
1068 * transitioned from Dermant -> Active state, and are added to the end
1069 * of the struct se_device->execute_task_list
1070 */
1071 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
1072 return 0;
1073}
1074
1075/* __transport_add_task_to_execute_queue():
1076 *
1077 * Called with se_dev_t->execute_task_lock called.
1078 */
1079static void __transport_add_task_to_execute_queue(
1080 struct se_task *task,
1081 struct se_task *task_prev,
1082 struct se_device *dev)
1083{
1084 int head_of_queue;
1085
1086 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
1087 atomic_inc(&dev->execute_tasks);
1088
1089 if (atomic_read(&task->task_state_active))
1090 return;
1091 /*
1092 * Determine if this task needs to go to HEAD_OF_QUEUE for the
1093 * state list as well. Running with SAM Task Attribute emulation
1094 * will always return head_of_queue == 0 here
1095 */
1096 if (head_of_queue)
1097 list_add(&task->t_state_list, (task_prev) ?
1098 &task_prev->t_state_list :
1099 &dev->state_task_list);
1100 else
1101 list_add_tail(&task->t_state_list, &dev->state_task_list);
1102
1103 atomic_set(&task->task_state_active, 1);
1104
1105 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1106 CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
1107 task, dev);
1108}
1109
1110static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
1111{
1112 struct se_device *dev;
1113 struct se_task *task;
1114 unsigned long flags;
1115
1116 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1117 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1118 dev = task->se_dev;
1119
1120 if (atomic_read(&task->task_state_active))
1121 continue;
1122
1123 spin_lock(&dev->execute_task_lock);
1124 list_add_tail(&task->t_state_list, &dev->state_task_list);
1125 atomic_set(&task->task_state_active, 1);
1126
1127 DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
1128 CMD_TFO(task->task_se_cmd)->get_task_tag(
1129 task->task_se_cmd), task, dev);
1130
1131 spin_unlock(&dev->execute_task_lock);
1132 }
1133 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1134}
1135
1136static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
1137{
1138 struct se_device *dev = SE_DEV(cmd);
1139 struct se_task *task, *task_prev = NULL;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&dev->execute_task_lock, flags);
1143 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
1144 if (atomic_read(&task->task_execute_queue))
1145 continue;
1146 /*
1147 * __transport_add_task_to_execute_queue() handles the
1148 * SAM Task Attribute emulation if enabled
1149 */
1150 __transport_add_task_to_execute_queue(task, task_prev, dev);
1151 atomic_set(&task->task_execute_queue, 1);
1152 task_prev = task;
1153 }
1154 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1155
1156 return;
1157}
1158
1159/* transport_get_task_from_execute_queue():
1160 *
1161 * Called with dev->execute_task_lock held.
1162 */
1163static struct se_task *
1164transport_get_task_from_execute_queue(struct se_device *dev)
1165{
1166 struct se_task *task;
1167
1168 if (list_empty(&dev->execute_task_list))
1169 return NULL;
1170
1171 list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
1172 break;
1173
1174 list_del(&task->t_execute_list);
1175 atomic_dec(&dev->execute_tasks);
1176
1177 return task;
1178}
1179
1180/* transport_remove_task_from_execute_queue():
1181 *
1182 *
1183 */
1184static void transport_remove_task_from_execute_queue(
1185 struct se_task *task,
1186 struct se_device *dev)
1187{
1188 unsigned long flags;
1189
1190 spin_lock_irqsave(&dev->execute_task_lock, flags);
1191 list_del(&task->t_execute_list);
1192 atomic_dec(&dev->execute_tasks);
1193 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
1194}
1195
1196unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1197{
1198 switch (cmd->data_direction) {
1199 case DMA_NONE:
1200 return "NONE";
1201 case DMA_FROM_DEVICE:
1202 return "READ";
1203 case DMA_TO_DEVICE:
1204 return "WRITE";
1205 case DMA_BIDIRECTIONAL:
1206 return "BIDI";
1207 default:
1208 break;
1209 }
1210
1211 return "UNKNOWN";
1212}
1213
1214void transport_dump_dev_state(
1215 struct se_device *dev,
1216 char *b,
1217 int *bl)
1218{
1219 *bl += sprintf(b + *bl, "Status: ");
1220 switch (dev->dev_status) {
1221 case TRANSPORT_DEVICE_ACTIVATED:
1222 *bl += sprintf(b + *bl, "ACTIVATED");
1223 break;
1224 case TRANSPORT_DEVICE_DEACTIVATED:
1225 *bl += sprintf(b + *bl, "DEACTIVATED");
1226 break;
1227 case TRANSPORT_DEVICE_SHUTDOWN:
1228 *bl += sprintf(b + *bl, "SHUTDOWN");
1229 break;
1230 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
1231 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
1232 *bl += sprintf(b + *bl, "OFFLINE");
1233 break;
1234 default:
1235 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
1236 break;
1237 }
1238
1239 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
1240 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
1241 dev->queue_depth);
1242 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
1243 DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
1244 *bl += sprintf(b + *bl, " ");
1245}
1246
1247/* transport_release_all_cmds():
1248 *
1249 *
1250 */
1251static void transport_release_all_cmds(struct se_device *dev)
1252{
1253 struct se_cmd *cmd = NULL;
1254 struct se_queue_req *qr = NULL, *qr_p = NULL;
1255 int bug_out = 0, t_state;
1256 unsigned long flags;
1257
1258 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1259 list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
1260 qr_list) {
1261
1262 cmd = (struct se_cmd *)qr->cmd;
1263 t_state = qr->state;
1264 list_del(&qr->qr_list);
1265 kfree(qr);
1266 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
1267 flags);
1268
1269 printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
1270 " t_state: %u directly\n",
1271 CMD_TFO(cmd)->get_task_tag(cmd),
1272 CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
1273
1274 transport_release_fe_cmd(cmd);
1275 bug_out = 1;
1276
1277 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
1278 }
1279 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
1280#if 0
1281 if (bug_out)
1282 BUG();
1283#endif
1284}
1285
1286void transport_dump_vpd_proto_id(
1287 struct t10_vpd *vpd,
1288 unsigned char *p_buf,
1289 int p_buf_len)
1290{
1291 unsigned char buf[VPD_TMP_BUF_SIZE];
1292 int len;
1293
1294 memset(buf, 0, VPD_TMP_BUF_SIZE);
1295 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1296
1297 switch (vpd->protocol_identifier) {
1298 case 0x00:
1299 sprintf(buf+len, "Fibre Channel\n");
1300 break;
1301 case 0x10:
1302 sprintf(buf+len, "Parallel SCSI\n");
1303 break;
1304 case 0x20:
1305 sprintf(buf+len, "SSA\n");
1306 break;
1307 case 0x30:
1308 sprintf(buf+len, "IEEE 1394\n");
1309 break;
1310 case 0x40:
1311 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1312 " Protocol\n");
1313 break;
1314 case 0x50:
1315 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1316 break;
1317 case 0x60:
1318 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1319 break;
1320 case 0x70:
1321 sprintf(buf+len, "Automation/Drive Interface Transport"
1322 " Protocol\n");
1323 break;
1324 case 0x80:
1325 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1326 break;
1327 default:
1328 sprintf(buf+len, "Unknown 0x%02x\n",
1329 vpd->protocol_identifier);
1330 break;
1331 }
1332
1333 if (p_buf)
1334 strncpy(p_buf, buf, p_buf_len);
1335 else
1336 printk(KERN_INFO "%s", buf);
1337}
1338
1339void
1340transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1341{
1342 /*
1343 * Check if the Protocol Identifier Valid (PIV) bit is set..
1344 *
1345 * from spc3r23.pdf section 7.5.1
1346 */
1347 if (page_83[1] & 0x80) {
1348 vpd->protocol_identifier = (page_83[0] & 0xf0);
1349 vpd->protocol_identifier_set = 1;
1350 transport_dump_vpd_proto_id(vpd, NULL, 0);
1351 }
1352}
1353EXPORT_SYMBOL(transport_set_vpd_proto_id);
1354
1355int transport_dump_vpd_assoc(
1356 struct t10_vpd *vpd,
1357 unsigned char *p_buf,
1358 int p_buf_len)
1359{
1360 unsigned char buf[VPD_TMP_BUF_SIZE];
1361 int ret = 0, len;
1362
1363 memset(buf, 0, VPD_TMP_BUF_SIZE);
1364 len = sprintf(buf, "T10 VPD Identifier Association: ");
1365
1366 switch (vpd->association) {
1367 case 0x00:
1368 sprintf(buf+len, "addressed logical unit\n");
1369 break;
1370 case 0x10:
1371 sprintf(buf+len, "target port\n");
1372 break;
1373 case 0x20:
1374 sprintf(buf+len, "SCSI target device\n");
1375 break;
1376 default:
1377 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1378 ret = -1;
1379 break;
1380 }
1381
1382 if (p_buf)
1383 strncpy(p_buf, buf, p_buf_len);
1384 else
1385 printk("%s", buf);
1386
1387 return ret;
1388}
1389
1390int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1391{
1392 /*
1393 * The VPD identification association..
1394 *
1395 * from spc3r23.pdf Section 7.6.3.1 Table 297
1396 */
1397 vpd->association = (page_83[1] & 0x30);
1398 return transport_dump_vpd_assoc(vpd, NULL, 0);
1399}
1400EXPORT_SYMBOL(transport_set_vpd_assoc);
1401
1402int transport_dump_vpd_ident_type(
1403 struct t10_vpd *vpd,
1404 unsigned char *p_buf,
1405 int p_buf_len)
1406{
1407 unsigned char buf[VPD_TMP_BUF_SIZE];
1408 int ret = 0, len;
1409
1410 memset(buf, 0, VPD_TMP_BUF_SIZE);
1411 len = sprintf(buf, "T10 VPD Identifier Type: ");
1412
1413 switch (vpd->device_identifier_type) {
1414 case 0x00:
1415 sprintf(buf+len, "Vendor specific\n");
1416 break;
1417 case 0x01:
1418 sprintf(buf+len, "T10 Vendor ID based\n");
1419 break;
1420 case 0x02:
1421 sprintf(buf+len, "EUI-64 based\n");
1422 break;
1423 case 0x03:
1424 sprintf(buf+len, "NAA\n");
1425 break;
1426 case 0x04:
1427 sprintf(buf+len, "Relative target port identifier\n");
1428 break;
1429 case 0x08:
1430 sprintf(buf+len, "SCSI name string\n");
1431 break;
1432 default:
1433 sprintf(buf+len, "Unsupported: 0x%02x\n",
1434 vpd->device_identifier_type);
1435 ret = -1;
1436 break;
1437 }
1438
1439 if (p_buf)
1440 strncpy(p_buf, buf, p_buf_len);
1441 else
1442 printk("%s", buf);
1443
1444 return ret;
1445}
1446
1447int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1448{
1449 /*
1450 * The VPD identifier type..
1451 *
1452 * from spc3r23.pdf Section 7.6.3.1 Table 298
1453 */
1454 vpd->device_identifier_type = (page_83[1] & 0x0f);
1455 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1456}
1457EXPORT_SYMBOL(transport_set_vpd_ident_type);
1458
1459int transport_dump_vpd_ident(
1460 struct t10_vpd *vpd,
1461 unsigned char *p_buf,
1462 int p_buf_len)
1463{
1464 unsigned char buf[VPD_TMP_BUF_SIZE];
1465 int ret = 0;
1466
1467 memset(buf, 0, VPD_TMP_BUF_SIZE);
1468
1469 switch (vpd->device_identifier_code_set) {
1470 case 0x01: /* Binary */
1471 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1472 &vpd->device_identifier[0]);
1473 break;
1474 case 0x02: /* ASCII */
1475 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1476 &vpd->device_identifier[0]);
1477 break;
1478 case 0x03: /* UTF-8 */
1479 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1480 &vpd->device_identifier[0]);
1481 break;
1482 default:
1483 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1484 " 0x%02x", vpd->device_identifier_code_set);
1485 ret = -1;
1486 break;
1487 }
1488
1489 if (p_buf)
1490 strncpy(p_buf, buf, p_buf_len);
1491 else
1492 printk("%s", buf);
1493
1494 return ret;
1495}
1496
1497int
1498transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1499{
1500 static const char hex_str[] = "0123456789abcdef";
1501 int j = 0, i = 4; /* offset to start of the identifer */
1502
1503 /*
1504 * The VPD Code Set (encoding)
1505 *
1506 * from spc3r23.pdf Section 7.6.3.1 Table 296
1507 */
1508 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1509 switch (vpd->device_identifier_code_set) {
1510 case 0x01: /* Binary */
1511 vpd->device_identifier[j++] =
1512 hex_str[vpd->device_identifier_type];
1513 while (i < (4 + page_83[3])) {
1514 vpd->device_identifier[j++] =
1515 hex_str[(page_83[i] & 0xf0) >> 4];
1516 vpd->device_identifier[j++] =
1517 hex_str[page_83[i] & 0x0f];
1518 i++;
1519 }
1520 break;
1521 case 0x02: /* ASCII */
1522 case 0x03: /* UTF-8 */
1523 while (i < (4 + page_83[3]))
1524 vpd->device_identifier[j++] = page_83[i++];
1525 break;
1526 default:
1527 break;
1528 }
1529
1530 return transport_dump_vpd_ident(vpd, NULL, 0);
1531}
1532EXPORT_SYMBOL(transport_set_vpd_ident);
1533
1534static void core_setup_task_attr_emulation(struct se_device *dev)
1535{
1536 /*
1537 * If this device is from Target_Core_Mod/pSCSI, disable the
1538 * SAM Task Attribute emulation.
1539 *
1540 * This is currently not available in upsream Linux/SCSI Target
1541 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1542 */
1543 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1544 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1545 return;
1546 }
1547
1548 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1549 DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1550 " device\n", TRANSPORT(dev)->name,
1551 TRANSPORT(dev)->get_device_rev(dev));
1552}
1553
1554static void scsi_dump_inquiry(struct se_device *dev)
1555{
1556 struct t10_wwn *wwn = DEV_T10_WWN(dev);
1557 int i, device_type;
1558 /*
1559 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1560 */
1561 printk(" Vendor: ");
1562 for (i = 0; i < 8; i++)
1563 if (wwn->vendor[i] >= 0x20)
1564 printk("%c", wwn->vendor[i]);
1565 else
1566 printk(" ");
1567
1568 printk(" Model: ");
1569 for (i = 0; i < 16; i++)
1570 if (wwn->model[i] >= 0x20)
1571 printk("%c", wwn->model[i]);
1572 else
1573 printk(" ");
1574
1575 printk(" Revision: ");
1576 for (i = 0; i < 4; i++)
1577 if (wwn->revision[i] >= 0x20)
1578 printk("%c", wwn->revision[i]);
1579 else
1580 printk(" ");
1581
1582 printk("\n");
1583
1584 device_type = TRANSPORT(dev)->get_device_type(dev);
1585 printk(" Type: %s ", scsi_device_type(device_type));
1586 printk(" ANSI SCSI revision: %02x\n",
1587 TRANSPORT(dev)->get_device_rev(dev));
1588}
1589
1590struct se_device *transport_add_device_to_core_hba(
1591 struct se_hba *hba,
1592 struct se_subsystem_api *transport,
1593 struct se_subsystem_dev *se_dev,
1594 u32 device_flags,
1595 void *transport_dev,
1596 struct se_dev_limits *dev_limits,
1597 const char *inquiry_prod,
1598 const char *inquiry_rev)
1599{
1600 int ret = 0, force_pt;
1601 struct se_device *dev;
1602
1603 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1604 if (!(dev)) {
1605 printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
1606 return NULL;
1607 }
1608 dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
1609 if (!(dev->dev_queue_obj)) {
1610 printk(KERN_ERR "Unable to allocate memory for"
1611 " dev->dev_queue_obj\n");
1612 kfree(dev);
1613 return NULL;
1614 }
1615 transport_init_queue_obj(dev->dev_queue_obj);
1616
1617 dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
1618 GFP_KERNEL);
1619 if (!(dev->dev_status_queue_obj)) {
1620 printk(KERN_ERR "Unable to allocate memory for"
1621 " dev->dev_status_queue_obj\n");
1622 kfree(dev->dev_queue_obj);
1623 kfree(dev);
1624 return NULL;
1625 }
1626 transport_init_queue_obj(dev->dev_status_queue_obj);
1627
1628 dev->dev_flags = device_flags;
1629 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1630 dev->dev_ptr = (void *) transport_dev;
1631 dev->se_hba = hba;
1632 dev->se_sub_dev = se_dev;
1633 dev->transport = transport;
1634 atomic_set(&dev->active_cmds, 0);
1635 INIT_LIST_HEAD(&dev->dev_list);
1636 INIT_LIST_HEAD(&dev->dev_sep_list);
1637 INIT_LIST_HEAD(&dev->dev_tmr_list);
1638 INIT_LIST_HEAD(&dev->execute_task_list);
1639 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1640 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1641 INIT_LIST_HEAD(&dev->state_task_list);
1642 spin_lock_init(&dev->execute_task_lock);
1643 spin_lock_init(&dev->delayed_cmd_lock);
1644 spin_lock_init(&dev->ordered_cmd_lock);
1645 spin_lock_init(&dev->state_task_lock);
1646 spin_lock_init(&dev->dev_alua_lock);
1647 spin_lock_init(&dev->dev_reservation_lock);
1648 spin_lock_init(&dev->dev_status_lock);
1649 spin_lock_init(&dev->dev_status_thr_lock);
1650 spin_lock_init(&dev->se_port_lock);
1651 spin_lock_init(&dev->se_tmr_lock);
1652
1653 dev->queue_depth = dev_limits->queue_depth;
1654 atomic_set(&dev->depth_left, dev->queue_depth);
1655 atomic_set(&dev->dev_ordered_id, 0);
1656
1657 se_dev_set_default_attribs(dev, dev_limits);
1658
1659 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1660 dev->creation_time = get_jiffies_64();
1661 spin_lock_init(&dev->stats_lock);
1662
1663 spin_lock(&hba->device_lock);
1664 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1665 hba->dev_count++;
1666 spin_unlock(&hba->device_lock);
1667 /*
1668 * Setup the SAM Task Attribute emulation for struct se_device
1669 */
1670 core_setup_task_attr_emulation(dev);
1671 /*
1672 * Force PR and ALUA passthrough emulation with internal object use.
1673 */
1674 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1675 /*
1676 * Setup the Reservations infrastructure for struct se_device
1677 */
1678 core_setup_reservations(dev, force_pt);
1679 /*
1680 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1681 */
1682 if (core_setup_alua(dev, force_pt) < 0)
1683 goto out;
1684
1685 /*
1686 * Startup the struct se_device processing thread
1687 */
1688 dev->process_thread = kthread_run(transport_processing_thread, dev,
1689 "LIO_%s", TRANSPORT(dev)->name);
1690 if (IS_ERR(dev->process_thread)) {
1691 printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
1692 TRANSPORT(dev)->name);
1693 goto out;
1694 }
1695
1696 /*
1697 * Preload the initial INQUIRY const values if we are doing
1698 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1699 * passthrough because this is being provided by the backend LLD.
1700 * This is required so that transport_get_inquiry() copies these
1701 * originals once back into DEV_T10_WWN(dev) for the virtual device
1702 * setup.
1703 */
1704 if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1705 if (!(inquiry_prod) || !(inquiry_prod)) {
1706 printk(KERN_ERR "All non TCM/pSCSI plugins require"
1707 " INQUIRY consts\n");
1708 goto out;
1709 }
1710
1711 strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
1712 strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
1713 strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
1714 }
1715 scsi_dump_inquiry(dev);
1716
1717out:
1718 if (!ret)
1719 return dev;
1720 kthread_stop(dev->process_thread);
1721
1722 spin_lock(&hba->device_lock);
1723 list_del(&dev->dev_list);
1724 hba->dev_count--;
1725 spin_unlock(&hba->device_lock);
1726
1727 se_release_vpd_for_dev(dev);
1728
1729 kfree(dev->dev_status_queue_obj);
1730 kfree(dev->dev_queue_obj);
1731 kfree(dev);
1732
1733 return NULL;
1734}
1735EXPORT_SYMBOL(transport_add_device_to_core_hba);
1736
1737/* transport_generic_prepare_cdb():
1738 *
1739 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1740 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1741 * The point of this is since we are mapping iSCSI LUNs to
1742 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1743 * devices and HBAs for a loop.
1744 */
1745static inline void transport_generic_prepare_cdb(
1746 unsigned char *cdb)
1747{
1748 switch (cdb[0]) {
1749 case READ_10: /* SBC - RDProtect */
1750 case READ_12: /* SBC - RDProtect */
1751 case READ_16: /* SBC - RDProtect */
1752 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1753 case VERIFY: /* SBC - VRProtect */
1754 case VERIFY_16: /* SBC - VRProtect */
1755 case WRITE_VERIFY: /* SBC - VRProtect */
1756 case WRITE_VERIFY_12: /* SBC - VRProtect */
1757 break;
1758 default:
1759 cdb[1] &= 0x1f; /* clear logical unit number */
1760 break;
1761 }
1762}
1763
1764static struct se_task *
1765transport_generic_get_task(struct se_cmd *cmd,
1766 enum dma_data_direction data_direction)
1767{
1768 struct se_task *task;
1769 struct se_device *dev = SE_DEV(cmd);
1770 unsigned long flags;
1771
1772 task = dev->transport->alloc_task(cmd);
1773 if (!task) {
1774 printk(KERN_ERR "Unable to allocate struct se_task\n");
1775 return NULL;
1776 }
1777
1778 INIT_LIST_HEAD(&task->t_list);
1779 INIT_LIST_HEAD(&task->t_execute_list);
1780 INIT_LIST_HEAD(&task->t_state_list);
1781 init_completion(&task->task_stop_comp);
1782 task->task_no = T_TASK(cmd)->t_tasks_no++;
1783 task->task_se_cmd = cmd;
1784 task->se_dev = dev;
1785 task->task_data_direction = data_direction;
1786
1787 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
1788 list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
1789 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
1790
1791 return task;
1792}
1793
1794static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1795
1796void transport_device_setup_cmd(struct se_cmd *cmd)
1797{
1798 cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
1799}
1800EXPORT_SYMBOL(transport_device_setup_cmd);
1801
1802/*
1803 * Used by fabric modules containing a local struct se_cmd within their
1804 * fabric dependent per I/O descriptor.
1805 */
1806void transport_init_se_cmd(
1807 struct se_cmd *cmd,
1808 struct target_core_fabric_ops *tfo,
1809 struct se_session *se_sess,
1810 u32 data_length,
1811 int data_direction,
1812 int task_attr,
1813 unsigned char *sense_buffer)
1814{
1815 INIT_LIST_HEAD(&cmd->se_lun_list);
1816 INIT_LIST_HEAD(&cmd->se_delayed_list);
1817 INIT_LIST_HEAD(&cmd->se_ordered_list);
1818 /*
1819 * Setup t_task pointer to t_task_backstore
1820 */
1821 cmd->t_task = &cmd->t_task_backstore;
1822
1823 INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
1824 init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
1825 init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
1826 init_completion(&T_TASK(cmd)->t_transport_stop_comp);
1827 spin_lock_init(&T_TASK(cmd)->t_state_lock);
1828 atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
1829
1830 cmd->se_tfo = tfo;
1831 cmd->se_sess = se_sess;
1832 cmd->data_length = data_length;
1833 cmd->data_direction = data_direction;
1834 cmd->sam_task_attr = task_attr;
1835 cmd->sense_buffer = sense_buffer;
1836}
1837EXPORT_SYMBOL(transport_init_se_cmd);
1838
1839static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1840{
1841 /*
1842 * Check if SAM Task Attribute emulation is enabled for this
1843 * struct se_device storage object
1844 */
1845 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1846 return 0;
1847
1848 if (cmd->sam_task_attr == TASK_ATTR_ACA) {
1849 DEBUG_STA("SAM Task Attribute ACA"
1850 " emulation is not supported\n");
1851 return -1;
1852 }
1853 /*
1854 * Used to determine when ORDERED commands should go from
1855 * Dormant to Active status.
1856 */
1857 cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
1858 smp_mb__after_atomic_inc();
1859 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1860 cmd->se_ordered_id, cmd->sam_task_attr,
1861 TRANSPORT(cmd->se_dev)->name);
1862 return 0;
1863}
1864
1865void transport_free_se_cmd(
1866 struct se_cmd *se_cmd)
1867{
1868 if (se_cmd->se_tmr_req)
1869 core_tmr_release_req(se_cmd->se_tmr_req);
1870 /*
1871 * Check and free any extended CDB buffer that was allocated
1872 */
1873 if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
1874 kfree(T_TASK(se_cmd)->t_task_cdb);
1875}
1876EXPORT_SYMBOL(transport_free_se_cmd);
1877
1878static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
1879
1880/* transport_generic_allocate_tasks():
1881 *
1882 * Called from fabric RX Thread.
1883 */
1884int transport_generic_allocate_tasks(
1885 struct se_cmd *cmd,
1886 unsigned char *cdb)
1887{
1888 int ret;
1889
1890 transport_generic_prepare_cdb(cdb);
1891
1892 /*
1893 * This is needed for early exceptions.
1894 */
1895 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
1896
1897 transport_device_setup_cmd(cmd);
1898 /*
1899 * Ensure that the received CDB is less than the max (252 + 8) bytes
1900 * for VARIABLE_LENGTH_CMD
1901 */
1902 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1903 printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
1904 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1905 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1906 return -1;
1907 }
1908 /*
1909 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1910 * allocate the additional extended CDB buffer now.. Otherwise
1911 * setup the pointer from __t_task_cdb to t_task_cdb.
1912 */
1913 if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
1914 T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
1915 GFP_KERNEL);
1916 if (!(T_TASK(cmd)->t_task_cdb)) {
1917 printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
1918 " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
1919 scsi_command_size(cdb),
1920 (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
1921 return -1;
1922 }
1923 } else
1924 T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
1925 /*
1926 * Copy the original CDB into T_TASK(cmd).
1927 */
1928 memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
1929 /*
1930 * Setup the received CDB based on SCSI defined opcodes and
1931 * perform unit attention, persistent reservations and ALUA
1932 * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
1933 * pointer is expected to be setup before we reach this point.
1934 */
1935 ret = transport_generic_cmd_sequencer(cmd, cdb);
1936 if (ret < 0)
1937 return ret;
1938 /*
1939 * Check for SAM Task Attribute Emulation
1940 */
1941 if (transport_check_alloc_task_attr(cmd) < 0) {
1942 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1943 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1944 return -2;
1945 }
1946 spin_lock(&cmd->se_lun->lun_sep_lock);
1947 if (cmd->se_lun->lun_sep)
1948 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1949 spin_unlock(&cmd->se_lun->lun_sep_lock);
1950 return 0;
1951}
1952EXPORT_SYMBOL(transport_generic_allocate_tasks);
1953
1954/*
1955 * Used by fabric module frontends not defining a TFO->new_cmd_map()
1956 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
1957 */
1958int transport_generic_handle_cdb(
1959 struct se_cmd *cmd)
1960{
1961 if (!SE_LUN(cmd)) {
1962 dump_stack();
1963 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
1964 return -1;
1965 }
1966
1967 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
1968 return 0;
1969}
1970EXPORT_SYMBOL(transport_generic_handle_cdb);
1971
1972/*
1973 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1974 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1975 * complete setup in TCM process context w/ TFO->new_cmd_map().
1976 */
1977int transport_generic_handle_cdb_map(
1978 struct se_cmd *cmd)
1979{
1980 if (!SE_LUN(cmd)) {
1981 dump_stack();
1982 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
1983 return -1;
1984 }
1985
1986 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
1987 return 0;
1988}
1989EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1990
1991/* transport_generic_handle_data():
1992 *
1993 *
1994 */
1995int transport_generic_handle_data(
1996 struct se_cmd *cmd)
1997{
1998 /*
1999 * For the software fabric case, then we assume the nexus is being
2000 * failed/shutdown when signals are pending from the kthread context
2001 * caller, so we return a failure. For the HW target mode case running
2002 * in interrupt code, the signal_pending() check is skipped.
2003 */
2004 if (!in_interrupt() && signal_pending(current))
2005 return -1;
2006 /*
2007 * If the received CDB has aleady been ABORTED by the generic
2008 * target engine, we now call transport_check_aborted_status()
2009 * to queue any delated TASK_ABORTED status for the received CDB to the
2010 * fabric module as we are expecting no futher incoming DATA OUT
2011 * sequences at this point.
2012 */
2013 if (transport_check_aborted_status(cmd, 1) != 0)
2014 return 0;
2015
2016 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
2017 return 0;
2018}
2019EXPORT_SYMBOL(transport_generic_handle_data);
2020
2021/* transport_generic_handle_tmr():
2022 *
2023 *
2024 */
2025int transport_generic_handle_tmr(
2026 struct se_cmd *cmd)
2027{
2028 /*
2029 * This is needed for early exceptions.
2030 */
2031 cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
2032 transport_device_setup_cmd(cmd);
2033
2034 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
2035 return 0;
2036}
2037EXPORT_SYMBOL(transport_generic_handle_tmr);
2038
2039static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
2040{
2041 struct se_task *task, *task_tmp;
2042 unsigned long flags;
2043 int ret = 0;
2044
2045 DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
2046 CMD_TFO(cmd)->get_task_tag(cmd));
2047
2048 /*
2049 * No tasks remain in the execution queue
2050 */
2051 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2052 list_for_each_entry_safe(task, task_tmp,
2053 &T_TASK(cmd)->t_task_list, t_list) {
2054 DEBUG_TS("task_no[%d] - Processing task %p\n",
2055 task->task_no, task);
2056 /*
2057 * If the struct se_task has not been sent and is not active,
2058 * remove the struct se_task from the execution queue.
2059 */
2060 if (!atomic_read(&task->task_sent) &&
2061 !atomic_read(&task->task_active)) {
2062 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2063 flags);
2064 transport_remove_task_from_execute_queue(task,
2065 task->se_dev);
2066
2067 DEBUG_TS("task_no[%d] - Removed from execute queue\n",
2068 task->task_no);
2069 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2070 continue;
2071 }
2072
2073 /*
2074 * If the struct se_task is active, sleep until it is returned
2075 * from the plugin.
2076 */
2077 if (atomic_read(&task->task_active)) {
2078 atomic_set(&task->task_stop, 1);
2079 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
2080 flags);
2081
2082 DEBUG_TS("task_no[%d] - Waiting to complete\n",
2083 task->task_no);
2084 wait_for_completion(&task->task_stop_comp);
2085 DEBUG_TS("task_no[%d] - Stopped successfully\n",
2086 task->task_no);
2087
2088 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2089 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
2090
2091 atomic_set(&task->task_active, 0);
2092 atomic_set(&task->task_stop, 0);
2093 } else {
2094 DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
2095 ret++;
2096 }
2097
2098 __transport_stop_task_timer(task, &flags);
2099 }
2100 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2101
2102 return ret;
2103}
2104
2105static void transport_failure_reset_queue_depth(struct se_device *dev)
2106{
2107 unsigned long flags;
2108
2109 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
2110 atomic_inc(&dev->depth_left);
2111 atomic_inc(&SE_HBA(dev)->left_queue_depth);
2112 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2113}
2114
2115/*
2116 * Handle SAM-esque emulation for generic transport request failures.
2117 */
2118static void transport_generic_request_failure(
2119 struct se_cmd *cmd,
2120 struct se_device *dev,
2121 int complete,
2122 int sc)
2123{
2124 DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2125 " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
2126 T_TASK(cmd)->t_task_cdb[0]);
2127 DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
2128 " %d/%d transport_error_status: %d\n",
2129 CMD_TFO(cmd)->get_cmd_state(cmd),
2130 cmd->t_state, cmd->deferred_t_state,
2131 cmd->transport_error_status);
2132 DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
2133 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
2134 " t_transport_active: %d t_transport_stop: %d"
2135 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
2136 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
2137 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
2138 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
2139 atomic_read(&T_TASK(cmd)->t_transport_active),
2140 atomic_read(&T_TASK(cmd)->t_transport_stop),
2141 atomic_read(&T_TASK(cmd)->t_transport_sent));
2142
2143 transport_stop_all_task_timers(cmd);
2144
2145 if (dev)
2146 transport_failure_reset_queue_depth(dev);
2147 /*
2148 * For SAM Task Attribute emulation for failed struct se_cmd
2149 */
2150 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2151 transport_complete_task_attr(cmd);
2152
2153 if (complete) {
2154 transport_direct_request_timeout(cmd);
2155 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
2156 }
2157
2158 switch (cmd->transport_error_status) {
2159 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
2160 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2161 break;
2162 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
2163 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
2164 break;
2165 case PYX_TRANSPORT_INVALID_CDB_FIELD:
2166 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2167 break;
2168 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
2169 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2170 break;
2171 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
2172 if (!sc)
2173 transport_new_cmd_failure(cmd);
2174 /*
2175 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
2176 * we force this session to fall back to session
2177 * recovery.
2178 */
2179 CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
2180 CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
2181
2182 goto check_stop;
2183 case PYX_TRANSPORT_LU_COMM_FAILURE:
2184 case PYX_TRANSPORT_ILLEGAL_REQUEST:
2185 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2186 break;
2187 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
2188 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
2189 break;
2190 case PYX_TRANSPORT_WRITE_PROTECTED:
2191 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
2192 break;
2193 case PYX_TRANSPORT_RESERVATION_CONFLICT:
2194 /*
2195 * No SENSE Data payload for this case, set SCSI Status
2196 * and queue the response to $FABRIC_MOD.
2197 *
2198 * Uses linux/include/scsi/scsi.h SAM status codes defs
2199 */
2200 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2201 /*
2202 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2203 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2204 * CONFLICT STATUS.
2205 *
2206 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2207 */
2208 if (SE_SESS(cmd) &&
2209 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
2210 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
2211 cmd->orig_fe_lun, 0x2C,
2212 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2213
2214 CMD_TFO(cmd)->queue_status(cmd);
2215 goto check_stop;
2216 case PYX_TRANSPORT_USE_SENSE_REASON:
2217 /*
2218 * struct se_cmd->scsi_sense_reason already set
2219 */
2220 break;
2221 default:
2222 printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
2223 T_TASK(cmd)->t_task_cdb[0],
2224 cmd->transport_error_status);
2225 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2226 break;
2227 }
2228
2229 if (!sc)
2230 transport_new_cmd_failure(cmd);
2231 else
2232 transport_send_check_condition_and_sense(cmd,
2233 cmd->scsi_sense_reason, 0);
2234check_stop:
2235 transport_lun_remove_cmd(cmd);
2236 if (!(transport_cmd_check_stop_to_fabric(cmd)))
2237 ;
2238}
2239
2240static void transport_direct_request_timeout(struct se_cmd *cmd)
2241{
2242 unsigned long flags;
2243
2244 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2245 if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
2246 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2247 return;
2248 }
2249 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
2250 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2251 return;
2252 }
2253
2254 atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
2255 &T_TASK(cmd)->t_se_count);
2256 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2257}
2258
2259static void transport_generic_request_timeout(struct se_cmd *cmd)
2260{
2261 unsigned long flags;
2262
2263 /*
2264 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
2265 * to allow last call to free memory resources.
2266 */
2267 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2268 if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
2269 int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
2270
2271 atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
2272 }
2273 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2274
2275 transport_generic_remove(cmd, 0, 0);
2276}
2277
2278static int
2279transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
2280{
2281 unsigned char *buf;
2282
2283 buf = kzalloc(data_length, GFP_KERNEL);
2284 if (!(buf)) {
2285 printk(KERN_ERR "Unable to allocate memory for buffer\n");
2286 return -1;
2287 }
2288
2289 T_TASK(cmd)->t_tasks_se_num = 0;
2290 T_TASK(cmd)->t_task_buf = buf;
2291
2292 return 0;
2293}
2294
2295static inline u32 transport_lba_21(unsigned char *cdb)
2296{
2297 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
2298}
2299
2300static inline u32 transport_lba_32(unsigned char *cdb)
2301{
2302 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2303}
2304
2305static inline unsigned long long transport_lba_64(unsigned char *cdb)
2306{
2307 unsigned int __v1, __v2;
2308
2309 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
2310 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2311
2312 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2313}
2314
2315/*
2316 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
2317 */
2318static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
2319{
2320 unsigned int __v1, __v2;
2321
2322 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
2323 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
2324
2325 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
2326}
2327
2328static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
2329{
2330 unsigned long flags;
2331
2332 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2333 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
2334 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2335}
2336
2337/*
2338 * Called from interrupt context.
2339 */
2340static void transport_task_timeout_handler(unsigned long data)
2341{
2342 struct se_task *task = (struct se_task *)data;
2343 struct se_cmd *cmd = TASK_CMD(task);
2344 unsigned long flags;
2345
2346 DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
2347
2348 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2349 if (task->task_flags & TF_STOP) {
2350 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2351 return;
2352 }
2353 task->task_flags &= ~TF_RUNNING;
2354
2355 /*
2356 * Determine if transport_complete_task() has already been called.
2357 */
2358 if (!(atomic_read(&task->task_active))) {
2359 DEBUG_TT("transport task: %p cmd: %p timeout task_active"
2360 " == 0\n", task, cmd);
2361 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2362 return;
2363 }
2364
2365 atomic_inc(&T_TASK(cmd)->t_se_count);
2366 atomic_inc(&T_TASK(cmd)->t_transport_timeout);
2367 T_TASK(cmd)->t_tasks_failed = 1;
2368
2369 atomic_set(&task->task_timeout, 1);
2370 task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
2371 task->task_scsi_status = 1;
2372
2373 if (atomic_read(&task->task_stop)) {
2374 DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
2375 " == 1\n", task, cmd);
2376 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2377 complete(&task->task_stop_comp);
2378 return;
2379 }
2380
2381 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
2382 DEBUG_TT("transport task: %p cmd: %p timeout non zero"
2383 " t_task_cdbs_left\n", task, cmd);
2384 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2385 return;
2386 }
2387 DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
2388 task, cmd);
2389
2390 cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
2391 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2392
2393 transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
2394}
2395
2396/*
2397 * Called with T_TASK(cmd)->t_state_lock held.
2398 */
2399static void transport_start_task_timer(struct se_task *task)
2400{
2401 struct se_device *dev = task->se_dev;
2402 int timeout;
2403
2404 if (task->task_flags & TF_RUNNING)
2405 return;
2406 /*
2407 * If the task_timeout is disabled, exit now.
2408 */
2409 timeout = DEV_ATTRIB(dev)->task_timeout;
2410 if (!(timeout))
2411 return;
2412
2413 init_timer(&task->task_timer);
2414 task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
2415 task->task_timer.data = (unsigned long) task;
2416 task->task_timer.function = transport_task_timeout_handler;
2417
2418 task->task_flags |= TF_RUNNING;
2419 add_timer(&task->task_timer);
2420#if 0
2421 printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
2422 " %d\n", task->task_se_cmd, task, timeout);
2423#endif
2424}
2425
2426/*
2427 * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
2428 */
2429void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
2430{
2431 struct se_cmd *cmd = TASK_CMD(task);
2432
2433 if (!(task->task_flags & TF_RUNNING))
2434 return;
2435
2436 task->task_flags |= TF_STOP;
2437 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
2438
2439 del_timer_sync(&task->task_timer);
2440
2441 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
2442 task->task_flags &= ~TF_RUNNING;
2443 task->task_flags &= ~TF_STOP;
2444}
2445
2446static void transport_stop_all_task_timers(struct se_cmd *cmd)
2447{
2448 struct se_task *task = NULL, *task_tmp;
2449 unsigned long flags;
2450
2451 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2452 list_for_each_entry_safe(task, task_tmp,
2453 &T_TASK(cmd)->t_task_list, t_list)
2454 __transport_stop_task_timer(task, &flags);
2455 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2456}
2457
2458static inline int transport_tcq_window_closed(struct se_device *dev)
2459{
2460 if (dev->dev_tcq_window_closed++ <
2461 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
2462 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
2463 } else
2464 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
2465
2466 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
2467 return 0;
2468}
2469
2470/*
2471 * Called from Fabric Module context from transport_execute_tasks()
2472 *
2473 * The return of this function determins if the tasks from struct se_cmd
2474 * get added to the execution queue in transport_execute_tasks(),
2475 * or are added to the delayed or ordered lists here.
2476 */
2477static inline int transport_execute_task_attr(struct se_cmd *cmd)
2478{
2479 if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
2480 return 1;
2481 /*
2482 * Check for the existance of HEAD_OF_QUEUE, and if true return 1
2483 * to allow the passed struct se_cmd list of tasks to the front of the list.
2484 */
2485 if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
2486 atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
2487 smp_mb__after_atomic_inc();
2488 DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
2489 " 0x%02x, se_ordered_id: %u\n",
2490 T_TASK(cmd)->t_task_cdb[0],
2491 cmd->se_ordered_id);
2492 return 1;
2493 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
2494 spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
2495 list_add_tail(&cmd->se_ordered_list,
2496 &SE_DEV(cmd)->ordered_cmd_list);
2497 spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
2498
2499 atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
2500 smp_mb__after_atomic_inc();
2501
2502 DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
2503 " list, se_ordered_id: %u\n",
2504 T_TASK(cmd)->t_task_cdb[0],
2505 cmd->se_ordered_id);
2506 /*
2507 * Add ORDERED command to tail of execution queue if
2508 * no other older commands exist that need to be
2509 * completed first.
2510 */
2511 if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
2512 return 1;
2513 } else {
2514 /*
2515 * For SIMPLE and UNTAGGED Task Attribute commands
2516 */
2517 atomic_inc(&SE_DEV(cmd)->simple_cmds);
2518 smp_mb__after_atomic_inc();
2519 }
2520 /*
2521 * Otherwise if one or more outstanding ORDERED task attribute exist,
2522 * add the dormant task(s) built for the passed struct se_cmd to the
2523 * execution queue and become in Active state for this struct se_device.
2524 */
2525 if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
2526 /*
2527 * Otherwise, add cmd w/ tasks to delayed cmd queue that
2528 * will be drained upon competion of HEAD_OF_QUEUE task.
2529 */
2530 spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
2531 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
2532 list_add_tail(&cmd->se_delayed_list,
2533 &SE_DEV(cmd)->delayed_cmd_list);
2534 spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
2535
2536 DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
2537 " delayed CMD list, se_ordered_id: %u\n",
2538 T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
2539 cmd->se_ordered_id);
2540 /*
2541 * Return zero to let transport_execute_tasks() know
2542 * not to add the delayed tasks to the execution list.
2543 */
2544 return 0;
2545 }
2546 /*
2547 * Otherwise, no ORDERED task attributes exist..
2548 */
2549 return 1;
2550}
2551
2552/*
2553 * Called from fabric module context in transport_generic_new_cmd() and
2554 * transport_generic_process_write()
2555 */
2556static int transport_execute_tasks(struct se_cmd *cmd)
2557{
2558 int add_tasks;
2559
2560 if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
2561 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
2562 cmd->transport_error_status =
2563 PYX_TRANSPORT_LU_COMM_FAILURE;
2564 transport_generic_request_failure(cmd, NULL, 0, 1);
2565 return 0;
2566 }
2567 }
2568 /*
2569 * Call transport_cmd_check_stop() to see if a fabric exception
2570 * has occured that prevents execution.
2571 */
2572 if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
2573 /*
2574 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2575 * attribute for the tasks of the received struct se_cmd CDB
2576 */
2577 add_tasks = transport_execute_task_attr(cmd);
2578 if (add_tasks == 0)
2579 goto execute_tasks;
2580 /*
2581 * This calls transport_add_tasks_from_cmd() to handle
2582 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2583 * (if enabled) in __transport_add_task_to_execute_queue() and
2584 * transport_add_task_check_sam_attr().
2585 */
2586 transport_add_tasks_from_cmd(cmd);
2587 }
2588 /*
2589 * Kick the execution queue for the cmd associated struct se_device
2590 * storage object.
2591 */
2592execute_tasks:
2593 __transport_execute_tasks(SE_DEV(cmd));
2594 return 0;
2595}
2596
2597/*
2598 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2599 * from struct se_device->execute_task_list and
2600 *
2601 * Called from transport_processing_thread()
2602 */
2603static int __transport_execute_tasks(struct se_device *dev)
2604{
2605 int error;
2606 struct se_cmd *cmd = NULL;
2607 struct se_task *task;
2608 unsigned long flags;
2609
2610 /*
2611 * Check if there is enough room in the device and HBA queue to send
2612 * struct se_transport_task's to the selected transport.
2613 */
2614check_depth:
2615 spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
2616 if (!(atomic_read(&dev->depth_left)) ||
2617 !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
2618 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2619 return transport_tcq_window_closed(dev);
2620 }
2621 dev->dev_tcq_window_closed = 0;
2622
2623 spin_lock(&dev->execute_task_lock);
2624 task = transport_get_task_from_execute_queue(dev);
2625 spin_unlock(&dev->execute_task_lock);
2626
2627 if (!task) {
2628 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2629 return 0;
2630 }
2631
2632 atomic_dec(&dev->depth_left);
2633 atomic_dec(&SE_HBA(dev)->left_queue_depth);
2634 spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
2635
2636 cmd = TASK_CMD(task);
2637
2638 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2639 atomic_set(&task->task_active, 1);
2640 atomic_set(&task->task_sent, 1);
2641 atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
2642
2643 if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
2644 T_TASK(cmd)->t_task_cdbs)
2645 atomic_set(&cmd->transport_sent, 1);
2646
2647 transport_start_task_timer(task);
2648 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2649 /*
2650 * The struct se_cmd->transport_emulate_cdb() function pointer is used
2651 * to grab REPORT_LUNS CDBs before they hit the
2652 * struct se_subsystem_api->do_task() caller below.
2653 */
2654 if (cmd->transport_emulate_cdb) {
2655 error = cmd->transport_emulate_cdb(cmd);
2656 if (error != 0) {
2657 cmd->transport_error_status = error;
2658 atomic_set(&task->task_active, 0);
2659 atomic_set(&cmd->transport_sent, 0);
2660 transport_stop_tasks_for_cmd(cmd);
2661 transport_generic_request_failure(cmd, dev, 0, 1);
2662 goto check_depth;
2663 }
2664 /*
2665 * Handle the successful completion for transport_emulate_cdb()
2666 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
2667 * Otherwise the caller is expected to complete the task with
2668 * proper status.
2669 */
2670 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
2671 cmd->scsi_status = SAM_STAT_GOOD;
2672 task->task_scsi_status = GOOD;
2673 transport_complete_task(task, 1);
2674 }
2675 } else {
2676 /*
2677 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
2678 * RAMDISK we use the internal transport_emulate_control_cdb() logic
2679 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
2680 * LUN emulation code.
2681 *
2682 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
2683 * call ->do_task() directly and let the underlying TCM subsystem plugin
2684 * code handle the CDB emulation.
2685 */
2686 if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
2687 (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2688 error = transport_emulate_control_cdb(task);
2689 else
2690 error = TRANSPORT(dev)->do_task(task);
2691
2692 if (error != 0) {
2693 cmd->transport_error_status = error;
2694 atomic_set(&task->task_active, 0);
2695 atomic_set(&cmd->transport_sent, 0);
2696 transport_stop_tasks_for_cmd(cmd);
2697 transport_generic_request_failure(cmd, dev, 0, 1);
2698 }
2699 }
2700
2701 goto check_depth;
2702
2703 return 0;
2704}
2705
2706void transport_new_cmd_failure(struct se_cmd *se_cmd)
2707{
2708 unsigned long flags;
2709 /*
2710 * Any unsolicited data will get dumped for failed command inside of
2711 * the fabric plugin
2712 */
2713 spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
2714 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2715 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2716 spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
2717
2718 CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
2719}
2720
2721static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
2722
2723static inline u32 transport_get_sectors_6(
2724 unsigned char *cdb,
2725 struct se_cmd *cmd,
2726 int *ret)
2727{
2728 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2729
2730 /*
2731 * Assume TYPE_DISK for non struct se_device objects.
2732 * Use 8-bit sector value.
2733 */
2734 if (!dev)
2735 goto type_disk;
2736
2737 /*
2738 * Use 24-bit allocation length for TYPE_TAPE.
2739 */
2740 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2741 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2742
2743 /*
2744 * Everything else assume TYPE_DISK Sector CDB location.
2745 * Use 8-bit sector value.
2746 */
2747type_disk:
2748 return (u32)cdb[4];
2749}
2750
2751static inline u32 transport_get_sectors_10(
2752 unsigned char *cdb,
2753 struct se_cmd *cmd,
2754 int *ret)
2755{
2756 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2757
2758 /*
2759 * Assume TYPE_DISK for non struct se_device objects.
2760 * Use 16-bit sector value.
2761 */
2762 if (!dev)
2763 goto type_disk;
2764
2765 /*
2766 * XXX_10 is not defined in SSC, throw an exception
2767 */
2768 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2769 *ret = -1;
2770 return 0;
2771 }
2772
2773 /*
2774 * Everything else assume TYPE_DISK Sector CDB location.
2775 * Use 16-bit sector value.
2776 */
2777type_disk:
2778 return (u32)(cdb[7] << 8) + cdb[8];
2779}
2780
2781static inline u32 transport_get_sectors_12(
2782 unsigned char *cdb,
2783 struct se_cmd *cmd,
2784 int *ret)
2785{
2786 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2787
2788 /*
2789 * Assume TYPE_DISK for non struct se_device objects.
2790 * Use 32-bit sector value.
2791 */
2792 if (!dev)
2793 goto type_disk;
2794
2795 /*
2796 * XXX_12 is not defined in SSC, throw an exception
2797 */
2798 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2799 *ret = -1;
2800 return 0;
2801 }
2802
2803 /*
2804 * Everything else assume TYPE_DISK Sector CDB location.
2805 * Use 32-bit sector value.
2806 */
2807type_disk:
2808 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2809}
2810
2811static inline u32 transport_get_sectors_16(
2812 unsigned char *cdb,
2813 struct se_cmd *cmd,
2814 int *ret)
2815{
2816 struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
2817
2818 /*
2819 * Assume TYPE_DISK for non struct se_device objects.
2820 * Use 32-bit sector value.
2821 */
2822 if (!dev)
2823 goto type_disk;
2824
2825 /*
2826 * Use 24-bit allocation length for TYPE_TAPE.
2827 */
2828 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
2829 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2830
2831type_disk:
2832 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2833 (cdb[12] << 8) + cdb[13];
2834}
2835
2836/*
2837 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2838 */
2839static inline u32 transport_get_sectors_32(
2840 unsigned char *cdb,
2841 struct se_cmd *cmd,
2842 int *ret)
2843{
2844 /*
2845 * Assume TYPE_DISK for non struct se_device objects.
2846 * Use 32-bit sector value.
2847 */
2848 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2849 (cdb[30] << 8) + cdb[31];
2850
2851}
2852
2853static inline u32 transport_get_size(
2854 u32 sectors,
2855 unsigned char *cdb,
2856 struct se_cmd *cmd)
2857{
2858 struct se_device *dev = SE_DEV(cmd);
2859
2860 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
2861 if (cdb[1] & 1) { /* sectors */
2862 return DEV_ATTRIB(dev)->block_size * sectors;
2863 } else /* bytes */
2864 return sectors;
2865 }
2866#if 0
2867 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
2868 " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
2869 DEV_ATTRIB(dev)->block_size * sectors,
2870 TRANSPORT(dev)->name);
2871#endif
2872 return DEV_ATTRIB(dev)->block_size * sectors;
2873}
2874
2875unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
2876{
2877 unsigned char result = 0;
2878 /*
2879 * MSB
2880 */
2881 if ((val[0] >= 'a') && (val[0] <= 'f'))
2882 result = ((val[0] - 'a' + 10) & 0xf) << 4;
2883 else
2884 if ((val[0] >= 'A') && (val[0] <= 'F'))
2885 result = ((val[0] - 'A' + 10) & 0xf) << 4;
2886 else /* digit */
2887 result = ((val[0] - '0') & 0xf) << 4;
2888 /*
2889 * LSB
2890 */
2891 if ((val[1] >= 'a') && (val[1] <= 'f'))
2892 result |= ((val[1] - 'a' + 10) & 0xf);
2893 else
2894 if ((val[1] >= 'A') && (val[1] <= 'F'))
2895 result |= ((val[1] - 'A' + 10) & 0xf);
2896 else /* digit */
2897 result |= ((val[1] - '0') & 0xf);
2898
2899 return result;
2900}
2901EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
2902
2903static void transport_xor_callback(struct se_cmd *cmd)
2904{
2905 unsigned char *buf, *addr;
2906 struct se_mem *se_mem;
2907 unsigned int offset;
2908 int i;
2909 /*
2910 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2911 *
2912 * 1) read the specified logical block(s);
2913 * 2) transfer logical blocks from the data-out buffer;
2914 * 3) XOR the logical blocks transferred from the data-out buffer with
2915 * the logical blocks read, storing the resulting XOR data in a buffer;
2916 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2917 * blocks transferred from the data-out buffer; and
2918 * 5) transfer the resulting XOR data to the data-in buffer.
2919 */
2920 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2921 if (!(buf)) {
2922 printk(KERN_ERR "Unable to allocate xor_callback buf\n");
2923 return;
2924 }
2925 /*
2926 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
2927 * into the locally allocated *buf
2928 */
2929 transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
2930 /*
2931 * Now perform the XOR against the BIDI read memory located at
2932 * T_TASK(cmd)->t_mem_bidi_list
2933 */
2934
2935 offset = 0;
2936 list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
2937 addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
2938 if (!(addr))
2939 goto out;
2940
2941 for (i = 0; i < se_mem->se_len; i++)
2942 *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
2943
2944 offset += se_mem->se_len;
2945 kunmap_atomic(addr, KM_USER0);
2946 }
2947out:
2948 kfree(buf);
2949}
2950
2951/*
2952 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2953 */
2954static int transport_get_sense_data(struct se_cmd *cmd)
2955{
2956 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2957 struct se_device *dev;
2958 struct se_task *task = NULL, *task_tmp;
2959 unsigned long flags;
2960 u32 offset = 0;
2961
2962 if (!SE_LUN(cmd)) {
2963 printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
2964 return -1;
2965 }
2966 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
2967 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2968 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2969 return 0;
2970 }
2971
2972 list_for_each_entry_safe(task, task_tmp,
2973 &T_TASK(cmd)->t_task_list, t_list) {
2974
2975 if (!task->task_sense)
2976 continue;
2977
2978 dev = task->se_dev;
2979 if (!(dev))
2980 continue;
2981
2982 if (!TRANSPORT(dev)->get_sense_buffer) {
2983 printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
2984 " is NULL\n");
2985 continue;
2986 }
2987
2988 sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
2989 if (!(sense_buffer)) {
2990 printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
2991 " sense buffer for task with sense\n",
2992 CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
2993 continue;
2994 }
2995 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
2996
2997 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
2998 TRANSPORT_SENSE_BUFFER);
2999
3000 memcpy((void *)&buffer[offset], (void *)sense_buffer,
3001 TRANSPORT_SENSE_BUFFER);
3002 cmd->scsi_status = task->task_scsi_status;
3003 /* Automatically padded */
3004 cmd->scsi_sense_length =
3005 (TRANSPORT_SENSE_BUFFER + offset);
3006
3007 printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
3008 " and sense\n",
3009 dev->se_hba->hba_id, TRANSPORT(dev)->name,
3010 cmd->scsi_status);
3011 return 0;
3012 }
3013 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3014
3015 return -1;
3016}
3017
3018static int transport_allocate_resources(struct se_cmd *cmd)
3019{
3020 u32 length = cmd->data_length;
3021
3022 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3023 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
3024 return transport_generic_get_mem(cmd, length, PAGE_SIZE);
3025 else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
3026 return transport_generic_allocate_buf(cmd, length);
3027 else
3028 return 0;
3029}
3030
3031static int
3032transport_handle_reservation_conflict(struct se_cmd *cmd)
3033{
3034 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3035 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3036 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
3037 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
3038 /*
3039 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
3040 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
3041 * CONFLICT STATUS.
3042 *
3043 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
3044 */
3045 if (SE_SESS(cmd) &&
3046 DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
3047 core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
3048 cmd->orig_fe_lun, 0x2C,
3049 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
3050 return -2;
3051}
3052
3053/* transport_generic_cmd_sequencer():
3054 *
3055 * Generic Command Sequencer that should work for most DAS transport
3056 * drivers.
3057 *
3058 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
3059 * RX Thread.
3060 *
3061 * FIXME: Need to support other SCSI OPCODES where as well.
3062 */
3063static int transport_generic_cmd_sequencer(
3064 struct se_cmd *cmd,
3065 unsigned char *cdb)
3066{
3067 struct se_device *dev = SE_DEV(cmd);
3068 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
3069 int ret = 0, sector_ret = 0, passthrough;
3070 u32 sectors = 0, size = 0, pr_reg_type = 0;
3071 u16 service_action;
3072 u8 alua_ascq = 0;
3073 /*
3074 * Check for an existing UNIT ATTENTION condition
3075 */
3076 if (core_scsi3_ua_check(cmd, cdb) < 0) {
3077 cmd->transport_wait_for_tasks =
3078 &transport_nop_wait_for_tasks;
3079 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3080 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
3081 return -2;
3082 }
3083 /*
3084 * Check status of Asymmetric Logical Unit Assignment port
3085 */
3086 ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
3087 if (ret != 0) {
3088 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3089 /*
3090 * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
3091 * The ALUA additional sense code qualifier (ASCQ) is determined
3092 * by the ALUA primary or secondary access state..
3093 */
3094 if (ret > 0) {
3095#if 0
3096 printk(KERN_INFO "[%s]: ALUA TG Port not available,"
3097 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
3098 CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
3099#endif
3100 transport_set_sense_codes(cmd, 0x04, alua_ascq);
3101 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3102 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
3103 return -2;
3104 }
3105 goto out_invalid_cdb_field;
3106 }
3107 /*
3108 * Check status for SPC-3 Persistent Reservations
3109 */
3110 if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
3111 if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
3112 cmd, cdb, pr_reg_type) != 0)
3113 return transport_handle_reservation_conflict(cmd);
3114 /*
3115 * This means the CDB is allowed for the SCSI Initiator port
3116 * when said port is *NOT* holding the legacy SPC-2 or
3117 * SPC-3 Persistent Reservation.
3118 */
3119 }
3120
3121 switch (cdb[0]) {
3122 case READ_6:
3123 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3124 if (sector_ret)
3125 goto out_unsupported_cdb;
3126 size = transport_get_size(sectors, cdb, cmd);
3127 cmd->transport_split_cdb = &split_cdb_XX_6;
3128 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3129 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3130 break;
3131 case READ_10:
3132 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3133 if (sector_ret)
3134 goto out_unsupported_cdb;
3135 size = transport_get_size(sectors, cdb, cmd);
3136 cmd->transport_split_cdb = &split_cdb_XX_10;
3137 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3138 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3139 break;
3140 case READ_12:
3141 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3142 if (sector_ret)
3143 goto out_unsupported_cdb;
3144 size = transport_get_size(sectors, cdb, cmd);
3145 cmd->transport_split_cdb = &split_cdb_XX_12;
3146 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3147 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3148 break;
3149 case READ_16:
3150 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3151 if (sector_ret)
3152 goto out_unsupported_cdb;
3153 size = transport_get_size(sectors, cdb, cmd);
3154 cmd->transport_split_cdb = &split_cdb_XX_16;
3155 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3156 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3157 break;
3158 case WRITE_6:
3159 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
3160 if (sector_ret)
3161 goto out_unsupported_cdb;
3162 size = transport_get_size(sectors, cdb, cmd);
3163 cmd->transport_split_cdb = &split_cdb_XX_6;
3164 T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
3165 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3166 break;
3167 case WRITE_10:
3168 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3169 if (sector_ret)
3170 goto out_unsupported_cdb;
3171 size = transport_get_size(sectors, cdb, cmd);
3172 cmd->transport_split_cdb = &split_cdb_XX_10;
3173 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3174 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3175 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3176 break;
3177 case WRITE_12:
3178 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
3179 if (sector_ret)
3180 goto out_unsupported_cdb;
3181 size = transport_get_size(sectors, cdb, cmd);
3182 cmd->transport_split_cdb = &split_cdb_XX_12;
3183 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3184 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3185 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3186 break;
3187 case WRITE_16:
3188 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3189 if (sector_ret)
3190 goto out_unsupported_cdb;
3191 size = transport_get_size(sectors, cdb, cmd);
3192 cmd->transport_split_cdb = &split_cdb_XX_16;
3193 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3194 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3195 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3196 break;
3197 case XDWRITEREAD_10:
3198 if ((cmd->data_direction != DMA_TO_DEVICE) ||
3199 !(T_TASK(cmd)->t_tasks_bidi))
3200 goto out_invalid_cdb_field;
3201 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3202 if (sector_ret)
3203 goto out_unsupported_cdb;
3204 size = transport_get_size(sectors, cdb, cmd);
3205 cmd->transport_split_cdb = &split_cdb_XX_10;
3206 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3207 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3208 passthrough = (TRANSPORT(dev)->transport_type ==
3209 TRANSPORT_PLUGIN_PHBA_PDEV);
3210 /*
3211 * Skip the remaining assignments for TCM/PSCSI passthrough
3212 */
3213 if (passthrough)
3214 break;
3215 /*
3216 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
3217 */
3218 cmd->transport_complete_callback = &transport_xor_callback;
3219 T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
3220 break;
3221 case VARIABLE_LENGTH_CMD:
3222 service_action = get_unaligned_be16(&cdb[8]);
3223 /*
3224 * Determine if this is TCM/PSCSI device and we should disable
3225 * internal emulation for this CDB.
3226 */
3227 passthrough = (TRANSPORT(dev)->transport_type ==
3228 TRANSPORT_PLUGIN_PHBA_PDEV);
3229
3230 switch (service_action) {
3231 case XDWRITEREAD_32:
3232 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3233 if (sector_ret)
3234 goto out_unsupported_cdb;
3235 size = transport_get_size(sectors, cdb, cmd);
3236 /*
3237 * Use WRITE_32 and READ_32 opcodes for the emulated
3238 * XDWRITE_READ_32 logic.
3239 */
3240 cmd->transport_split_cdb = &split_cdb_XX_32;
3241 T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
3242 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
3243
3244 /*
3245 * Skip the remaining assignments for TCM/PSCSI passthrough
3246 */
3247 if (passthrough)
3248 break;
3249
3250 /*
3251 * Setup BIDI XOR callback to be run during
3252 * transport_generic_complete_ok()
3253 */
3254 cmd->transport_complete_callback = &transport_xor_callback;
3255 T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
3256 break;
3257 case WRITE_SAME_32:
3258 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
3259 if (sector_ret)
3260 goto out_unsupported_cdb;
3261 size = transport_get_size(sectors, cdb, cmd);
3262 T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
3263 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3264
3265 /*
3266 * Skip the remaining assignments for TCM/PSCSI passthrough
3267 */
3268 if (passthrough)
3269 break;
3270
3271 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3272 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3273 " bits not supported for Block Discard"
3274 " Emulation\n");
3275 goto out_invalid_cdb_field;
3276 }
3277 /*
3278 * Currently for the emulated case we only accept
3279 * tpws with the UNMAP=1 bit set.
3280 */
3281 if (!(cdb[10] & 0x08)) {
3282 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
3283 " supported for Block Discard Emulation\n");
3284 goto out_invalid_cdb_field;
3285 }
3286 break;
3287 default:
3288 printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
3289 " 0x%04x not supported\n", service_action);
3290 goto out_unsupported_cdb;
3291 }
3292 break;
3293 case 0xa3:
3294 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3295 /* MAINTENANCE_IN from SCC-2 */
3296 /*
3297 * Check for emulated MI_REPORT_TARGET_PGS.
3298 */
3299 if (cdb[1] == MI_REPORT_TARGET_PGS) {
3300 cmd->transport_emulate_cdb =
3301 (T10_ALUA(su_dev)->alua_type ==
3302 SPC3_ALUA_EMULATED) ?
3303 &core_emulate_report_target_port_groups :
3304 NULL;
3305 }
3306 size = (cdb[6] << 24) | (cdb[7] << 16) |
3307 (cdb[8] << 8) | cdb[9];
3308 } else {
3309 /* GPCMD_SEND_KEY from multi media commands */
3310 size = (cdb[8] << 8) + cdb[9];
3311 }
3312 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3313 break;
3314 case MODE_SELECT:
3315 size = cdb[4];
3316 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3317 break;
3318 case MODE_SELECT_10:
3319 size = (cdb[7] << 8) + cdb[8];
3320 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3321 break;
3322 case MODE_SENSE:
3323 size = cdb[4];
3324 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3325 break;
3326 case MODE_SENSE_10:
3327 case GPCMD_READ_BUFFER_CAPACITY:
3328 case GPCMD_SEND_OPC:
3329 case LOG_SELECT:
3330 case LOG_SENSE:
3331 size = (cdb[7] << 8) + cdb[8];
3332 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3333 break;
3334 case READ_BLOCK_LIMITS:
3335 size = READ_BLOCK_LEN;
3336 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3337 break;
3338 case GPCMD_GET_CONFIGURATION:
3339 case GPCMD_READ_FORMAT_CAPACITIES:
3340 case GPCMD_READ_DISC_INFO:
3341 case GPCMD_READ_TRACK_RZONE_INFO:
3342 size = (cdb[7] << 8) + cdb[8];
3343 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3344 break;
3345 case PERSISTENT_RESERVE_IN:
3346 case PERSISTENT_RESERVE_OUT:
3347 cmd->transport_emulate_cdb =
3348 (T10_RES(su_dev)->res_type ==
3349 SPC3_PERSISTENT_RESERVATIONS) ?
3350 &core_scsi3_emulate_pr : NULL;
3351 size = (cdb[7] << 8) + cdb[8];
3352 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3353 break;
3354 case GPCMD_MECHANISM_STATUS:
3355 case GPCMD_READ_DVD_STRUCTURE:
3356 size = (cdb[8] << 8) + cdb[9];
3357 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3358 break;
3359 case READ_POSITION:
3360 size = READ_POSITION_LEN;
3361 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3362 break;
3363 case 0xa4:
3364 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
3365 /* MAINTENANCE_OUT from SCC-2
3366 *
3367 * Check for emulated MO_SET_TARGET_PGS.
3368 */
3369 if (cdb[1] == MO_SET_TARGET_PGS) {
3370 cmd->transport_emulate_cdb =
3371 (T10_ALUA(su_dev)->alua_type ==
3372 SPC3_ALUA_EMULATED) ?
3373 &core_emulate_set_target_port_groups :
3374 NULL;
3375 }
3376
3377 size = (cdb[6] << 24) | (cdb[7] << 16) |
3378 (cdb[8] << 8) | cdb[9];
3379 } else {
3380 /* GPCMD_REPORT_KEY from multi media commands */
3381 size = (cdb[8] << 8) + cdb[9];
3382 }
3383 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3384 break;
3385 case INQUIRY:
3386 size = (cdb[3] << 8) + cdb[4];
3387 /*
3388 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
3389 * See spc4r17 section 5.3
3390 */
3391 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3392 cmd->sam_task_attr = TASK_ATTR_HOQ;
3393 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3394 break;
3395 case READ_BUFFER:
3396 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3397 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3398 break;
3399 case READ_CAPACITY:
3400 size = READ_CAP_LEN;
3401 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3402 break;
3403 case READ_MEDIA_SERIAL_NUMBER:
3404 case SECURITY_PROTOCOL_IN:
3405 case SECURITY_PROTOCOL_OUT:
3406 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3407 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3408 break;
3409 case SERVICE_ACTION_IN:
3410 case ACCESS_CONTROL_IN:
3411 case ACCESS_CONTROL_OUT:
3412 case EXTENDED_COPY:
3413 case READ_ATTRIBUTE:
3414 case RECEIVE_COPY_RESULTS:
3415 case WRITE_ATTRIBUTE:
3416 size = (cdb[10] << 24) | (cdb[11] << 16) |
3417 (cdb[12] << 8) | cdb[13];
3418 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3419 break;
3420 case RECEIVE_DIAGNOSTIC:
3421 case SEND_DIAGNOSTIC:
3422 size = (cdb[3] << 8) | cdb[4];
3423 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3424 break;
3425/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
3426#if 0
3427 case GPCMD_READ_CD:
3428 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3429 size = (2336 * sectors);
3430 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3431 break;
3432#endif
3433 case READ_TOC:
3434 size = cdb[8];
3435 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3436 break;
3437 case REQUEST_SENSE:
3438 size = cdb[4];
3439 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3440 break;
3441 case READ_ELEMENT_STATUS:
3442 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
3443 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3444 break;
3445 case WRITE_BUFFER:
3446 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
3447 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3448 break;
3449 case RESERVE:
3450 case RESERVE_10:
3451 /*
3452 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
3453 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3454 */
3455 if (cdb[0] == RESERVE_10)
3456 size = (cdb[7] << 8) | cdb[8];
3457 else
3458 size = cmd->data_length;
3459
3460 /*
3461 * Setup the legacy emulated handler for SPC-2 and
3462 * >= SPC-3 compatible reservation handling (CRH=1)
3463 * Otherwise, we assume the underlying SCSI logic is
3464 * is running in SPC_PASSTHROUGH, and wants reservations
3465 * emulation disabled.
3466 */
3467 cmd->transport_emulate_cdb =
3468 (T10_RES(su_dev)->res_type !=
3469 SPC_PASSTHROUGH) ?
3470 &core_scsi2_emulate_crh : NULL;
3471 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3472 break;
3473 case RELEASE:
3474 case RELEASE_10:
3475 /*
3476 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
3477 * Assume the passthrough or $FABRIC_MOD will tell us about it.
3478 */
3479 if (cdb[0] == RELEASE_10)
3480 size = (cdb[7] << 8) | cdb[8];
3481 else
3482 size = cmd->data_length;
3483
3484 cmd->transport_emulate_cdb =
3485 (T10_RES(su_dev)->res_type !=
3486 SPC_PASSTHROUGH) ?
3487 &core_scsi2_emulate_crh : NULL;
3488 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3489 break;
3490 case SYNCHRONIZE_CACHE:
3491 case 0x91: /* SYNCHRONIZE_CACHE_16: */
3492 /*
3493 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
3494 */
3495 if (cdb[0] == SYNCHRONIZE_CACHE) {
3496 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3497 T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
3498 } else {
3499 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3500 T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
3501 }
3502 if (sector_ret)
3503 goto out_unsupported_cdb;
3504
3505 size = transport_get_size(sectors, cdb, cmd);
3506 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3507
3508 /*
3509 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
3510 */
3511 if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
3512 break;
3513 /*
3514 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
3515 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
3516 */
3517 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3518 /*
3519 * Check to ensure that LBA + Range does not exceed past end of
3520 * device.
3521 */
3522 if (transport_get_sectors(cmd) < 0)
3523 goto out_invalid_cdb_field;
3524 break;
3525 case UNMAP:
3526 size = get_unaligned_be16(&cdb[7]);
3527 passthrough = (TRANSPORT(dev)->transport_type ==
3528 TRANSPORT_PLUGIN_PHBA_PDEV);
3529 /*
3530 * Determine if the received UNMAP used to for direct passthrough
3531 * into Linux/SCSI with struct request via TCM/pSCSI or we are
3532 * signaling the use of internal transport_generic_unmap() emulation
3533 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
3534 * subsystem plugin backstores.
3535 */
3536 if (!(passthrough))
3537 cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
3538
3539 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3540 break;
3541 case WRITE_SAME_16:
3542 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
3543 if (sector_ret)
3544 goto out_unsupported_cdb;
3545 size = transport_get_size(sectors, cdb, cmd);
3546 T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
3547 passthrough = (TRANSPORT(dev)->transport_type ==
3548 TRANSPORT_PLUGIN_PHBA_PDEV);
3549 /*
3550 * Determine if the received WRITE_SAME_16 is used to for direct
3551 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
3552 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
3553 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
3554 * TCM/FILEIO subsystem plugin backstores.
3555 */
3556 if (!(passthrough)) {
3557 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
3558 printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
3559 " bits not supported for Block Discard"
3560 " Emulation\n");
3561 goto out_invalid_cdb_field;
3562 }
3563 /*
3564 * Currently for the emulated case we only accept
3565 * tpws with the UNMAP=1 bit set.
3566 */
3567 if (!(cdb[1] & 0x08)) {
3568 printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
3569 " supported for Block Discard Emulation\n");
3570 goto out_invalid_cdb_field;
3571 }
3572 }
3573 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3574 break;
3575 case ALLOW_MEDIUM_REMOVAL:
3576 case GPCMD_CLOSE_TRACK:
3577 case ERASE:
3578 case INITIALIZE_ELEMENT_STATUS:
3579 case GPCMD_LOAD_UNLOAD:
3580 case REZERO_UNIT:
3581 case SEEK_10:
3582 case GPCMD_SET_SPEED:
3583 case SPACE:
3584 case START_STOP:
3585 case TEST_UNIT_READY:
3586 case VERIFY:
3587 case WRITE_FILEMARKS:
3588 case MOVE_MEDIUM:
3589 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
3590 break;
3591 case REPORT_LUNS:
3592 cmd->transport_emulate_cdb =
3593 &transport_core_report_lun_response;
3594 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
3595 /*
3596 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3597 * See spc4r17 section 5.3
3598 */
3599 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3600 cmd->sam_task_attr = TASK_ATTR_HOQ;
3601 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
3602 break;
3603 default:
3604 printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
3605 " 0x%02x, sending CHECK_CONDITION.\n",
3606 CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
3607 cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
3608 goto out_unsupported_cdb;
3609 }
3610
3611 if (size != cmd->data_length) {
3612 printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
3613 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3614 " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
3615 cmd->data_length, size, cdb[0]);
3616
3617 cmd->cmd_spdtl = size;
3618
3619 if (cmd->data_direction == DMA_TO_DEVICE) {
3620 printk(KERN_ERR "Rejecting underflow/overflow"
3621 " WRITE data\n");
3622 goto out_invalid_cdb_field;
3623 }
3624 /*
3625 * Reject READ_* or WRITE_* with overflow/underflow for
3626 * type SCF_SCSI_DATA_SG_IO_CDB.
3627 */
3628 if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
3629 printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
3630 " CDB on non 512-byte sector setup subsystem"
3631 " plugin: %s\n", TRANSPORT(dev)->name);
3632 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3633 goto out_invalid_cdb_field;
3634 }
3635
3636 if (size > cmd->data_length) {
3637 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3638 cmd->residual_count = (size - cmd->data_length);
3639 } else {
3640 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3641 cmd->residual_count = (cmd->data_length - size);
3642 }
3643 cmd->data_length = size;
3644 }
3645
3646 transport_set_supported_SAM_opcode(cmd);
3647 return ret;
3648
3649out_unsupported_cdb:
3650 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3651 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3652 return -2;
3653out_invalid_cdb_field:
3654 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3655 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3656 return -2;
3657}
3658
3659static inline void transport_release_tasks(struct se_cmd *);
3660
3661/*
3662 * This function will copy a contiguous *src buffer into a destination
3663 * struct scatterlist array.
3664 */
3665static void transport_memcpy_write_contig(
3666 struct se_cmd *cmd,
3667 struct scatterlist *sg_d,
3668 unsigned char *src)
3669{
3670 u32 i = 0, length = 0, total_length = cmd->data_length;
3671 void *dst;
3672
3673 while (total_length) {
3674 length = sg_d[i].length;
3675
3676 if (length > total_length)
3677 length = total_length;
3678
3679 dst = sg_virt(&sg_d[i]);
3680
3681 memcpy(dst, src, length);
3682
3683 if (!(total_length -= length))
3684 return;
3685
3686 src += length;
3687 i++;
3688 }
3689}
3690
3691/*
3692 * This function will copy a struct scatterlist array *sg_s into a destination
3693 * contiguous *dst buffer.
3694 */
3695static void transport_memcpy_read_contig(
3696 struct se_cmd *cmd,
3697 unsigned char *dst,
3698 struct scatterlist *sg_s)
3699{
3700 u32 i = 0, length = 0, total_length = cmd->data_length;
3701 void *src;
3702
3703 while (total_length) {
3704 length = sg_s[i].length;
3705
3706 if (length > total_length)
3707 length = total_length;
3708
3709 src = sg_virt(&sg_s[i]);
3710
3711 memcpy(dst, src, length);
3712
3713 if (!(total_length -= length))
3714 return;
3715
3716 dst += length;
3717 i++;
3718 }
3719}
3720
3721static void transport_memcpy_se_mem_read_contig(
3722 struct se_cmd *cmd,
3723 unsigned char *dst,
3724 struct list_head *se_mem_list)
3725{
3726 struct se_mem *se_mem;
3727 void *src;
3728 u32 length = 0, total_length = cmd->data_length;
3729
3730 list_for_each_entry(se_mem, se_mem_list, se_list) {
3731 length = se_mem->se_len;
3732
3733 if (length > total_length)
3734 length = total_length;
3735
3736 src = page_address(se_mem->se_page) + se_mem->se_off;
3737
3738 memcpy(dst, src, length);
3739
3740 if (!(total_length -= length))
3741 return;
3742
3743 dst += length;
3744 }
3745}
3746
3747/*
3748 * Called from transport_generic_complete_ok() and
3749 * transport_generic_request_failure() to determine which dormant/delayed
3750 * and ordered cmds need to have their tasks added to the execution queue.
3751 */
3752static void transport_complete_task_attr(struct se_cmd *cmd)
3753{
3754 struct se_device *dev = SE_DEV(cmd);
3755 struct se_cmd *cmd_p, *cmd_tmp;
3756 int new_active_tasks = 0;
3757
3758 if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
3759 atomic_dec(&dev->simple_cmds);
3760 smp_mb__after_atomic_dec();
3761 dev->dev_cur_ordered_id++;
3762 DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
3763 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3764 cmd->se_ordered_id);
3765 } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
3766 atomic_dec(&dev->dev_hoq_count);
3767 smp_mb__after_atomic_dec();
3768 dev->dev_cur_ordered_id++;
3769 DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
3770 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3771 cmd->se_ordered_id);
3772 } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
3773 spin_lock(&dev->ordered_cmd_lock);
3774 list_del(&cmd->se_ordered_list);
3775 atomic_dec(&dev->dev_ordered_sync);
3776 smp_mb__after_atomic_dec();
3777 spin_unlock(&dev->ordered_cmd_lock);
3778
3779 dev->dev_cur_ordered_id++;
3780 DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
3781 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3782 }
3783 /*
3784 * Process all commands up to the last received
3785 * ORDERED task attribute which requires another blocking
3786 * boundary
3787 */
3788 spin_lock(&dev->delayed_cmd_lock);
3789 list_for_each_entry_safe(cmd_p, cmd_tmp,
3790 &dev->delayed_cmd_list, se_delayed_list) {
3791
3792 list_del(&cmd_p->se_delayed_list);
3793 spin_unlock(&dev->delayed_cmd_lock);
3794
3795 DEBUG_STA("Calling add_tasks() for"
3796 " cmd_p: 0x%02x Task Attr: 0x%02x"
3797 " Dormant -> Active, se_ordered_id: %u\n",
3798 T_TASK(cmd_p)->t_task_cdb[0],
3799 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3800
3801 transport_add_tasks_from_cmd(cmd_p);
3802 new_active_tasks++;
3803
3804 spin_lock(&dev->delayed_cmd_lock);
3805 if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
3806 break;
3807 }
3808 spin_unlock(&dev->delayed_cmd_lock);
3809 /*
3810 * If new tasks have become active, wake up the transport thread
3811 * to do the processing of the Active tasks.
3812 */
3813 if (new_active_tasks != 0)
3814 wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
3815}
3816
3817static void transport_generic_complete_ok(struct se_cmd *cmd)
3818{
3819 int reason = 0;
3820 /*
3821 * Check if we need to move delayed/dormant tasks from cmds on the
3822 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3823 * Attribute.
3824 */
3825 if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3826 transport_complete_task_attr(cmd);
3827 /*
3828 * Check if we need to retrieve a sense buffer from
3829 * the struct se_cmd in question.
3830 */
3831 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3832 if (transport_get_sense_data(cmd) < 0)
3833 reason = TCM_NON_EXISTENT_LUN;
3834
3835 /*
3836 * Only set when an struct se_task->task_scsi_status returned
3837 * a non GOOD status.
3838 */
3839 if (cmd->scsi_status) {
3840 transport_send_check_condition_and_sense(
3841 cmd, reason, 1);
3842 transport_lun_remove_cmd(cmd);
3843 transport_cmd_check_stop_to_fabric(cmd);
3844 return;
3845 }
3846 }
3847 /*
3848 * Check for a callback, used by amoungst other things
3849 * XDWRITE_READ_10 emulation.
3850 */
3851 if (cmd->transport_complete_callback)
3852 cmd->transport_complete_callback(cmd);
3853
3854 switch (cmd->data_direction) {
3855 case DMA_FROM_DEVICE:
3856 spin_lock(&cmd->se_lun->lun_sep_lock);
3857 if (SE_LUN(cmd)->lun_sep) {
3858 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3859 cmd->data_length;
3860 }
3861 spin_unlock(&cmd->se_lun->lun_sep_lock);
3862 /*
3863 * If enabled by TCM fabirc module pre-registered SGL
3864 * memory, perform the memcpy() from the TCM internal
3865 * contigious buffer back to the original SGL.
3866 */
3867 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
3868 transport_memcpy_write_contig(cmd,
3869 T_TASK(cmd)->t_task_pt_sgl,
3870 T_TASK(cmd)->t_task_buf);
3871
3872 CMD_TFO(cmd)->queue_data_in(cmd);
3873 break;
3874 case DMA_TO_DEVICE:
3875 spin_lock(&cmd->se_lun->lun_sep_lock);
3876 if (SE_LUN(cmd)->lun_sep) {
3877 SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
3878 cmd->data_length;
3879 }
3880 spin_unlock(&cmd->se_lun->lun_sep_lock);
3881 /*
3882 * Check if we need to send READ payload for BIDI-COMMAND
3883 */
3884 if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
3885 spin_lock(&cmd->se_lun->lun_sep_lock);
3886 if (SE_LUN(cmd)->lun_sep) {
3887 SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
3888 cmd->data_length;
3889 }
3890 spin_unlock(&cmd->se_lun->lun_sep_lock);
3891 CMD_TFO(cmd)->queue_data_in(cmd);
3892 break;
3893 }
3894 /* Fall through for DMA_TO_DEVICE */
3895 case DMA_NONE:
3896 CMD_TFO(cmd)->queue_status(cmd);
3897 break;
3898 default:
3899 break;
3900 }
3901
3902 transport_lun_remove_cmd(cmd);
3903 transport_cmd_check_stop_to_fabric(cmd);
3904}
3905
3906static void transport_free_dev_tasks(struct se_cmd *cmd)
3907{
3908 struct se_task *task, *task_tmp;
3909 unsigned long flags;
3910
3911 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3912 list_for_each_entry_safe(task, task_tmp,
3913 &T_TASK(cmd)->t_task_list, t_list) {
3914 if (atomic_read(&task->task_active))
3915 continue;
3916
3917 kfree(task->task_sg_bidi);
3918 kfree(task->task_sg);
3919
3920 list_del(&task->t_list);
3921
3922 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3923 if (task->se_dev)
3924 TRANSPORT(task->se_dev)->free_task(task);
3925 else
3926 printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
3927 task->task_no);
3928 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
3929 }
3930 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
3931}
3932
3933static inline void transport_free_pages(struct se_cmd *cmd)
3934{
3935 struct se_mem *se_mem, *se_mem_tmp;
3936 int free_page = 1;
3937
3938 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3939 free_page = 0;
3940 if (cmd->se_dev->transport->do_se_mem_map)
3941 free_page = 0;
3942
3943 if (T_TASK(cmd)->t_task_buf) {
3944 kfree(T_TASK(cmd)->t_task_buf);
3945 T_TASK(cmd)->t_task_buf = NULL;
3946 return;
3947 }
3948
3949 /*
3950 * Caller will handle releasing of struct se_mem.
3951 */
3952 if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
3953 return;
3954
3955 if (!(T_TASK(cmd)->t_tasks_se_num))
3956 return;
3957
3958 list_for_each_entry_safe(se_mem, se_mem_tmp,
3959 T_TASK(cmd)->t_mem_list, se_list) {
3960 /*
3961 * We only release call __free_page(struct se_mem->se_page) when
3962 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3963 */
3964 if (free_page)
3965 __free_page(se_mem->se_page);
3966
3967 list_del(&se_mem->se_list);
3968 kmem_cache_free(se_mem_cache, se_mem);
3969 }
3970
3971 if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
3972 list_for_each_entry_safe(se_mem, se_mem_tmp,
3973 T_TASK(cmd)->t_mem_bidi_list, se_list) {
3974 /*
3975 * We only release call __free_page(struct se_mem->se_page) when
3976 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
3977 */
3978 if (free_page)
3979 __free_page(se_mem->se_page);
3980
3981 list_del(&se_mem->se_list);
3982 kmem_cache_free(se_mem_cache, se_mem);
3983 }
3984 }
3985
3986 kfree(T_TASK(cmd)->t_mem_bidi_list);
3987 T_TASK(cmd)->t_mem_bidi_list = NULL;
3988 kfree(T_TASK(cmd)->t_mem_list);
3989 T_TASK(cmd)->t_mem_list = NULL;
3990 T_TASK(cmd)->t_tasks_se_num = 0;
3991}
3992
3993static inline void transport_release_tasks(struct se_cmd *cmd)
3994{
3995 transport_free_dev_tasks(cmd);
3996}
3997
3998static inline int transport_dec_and_check(struct se_cmd *cmd)
3999{
4000 unsigned long flags;
4001
4002 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4003 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
4004 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
4005 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4006 flags);
4007 return 1;
4008 }
4009 }
4010
4011 if (atomic_read(&T_TASK(cmd)->t_se_count)) {
4012 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
4013 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4014 flags);
4015 return 1;
4016 }
4017 }
4018 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4019
4020 return 0;
4021}
4022
4023static void transport_release_fe_cmd(struct se_cmd *cmd)
4024{
4025 unsigned long flags;
4026
4027 if (transport_dec_and_check(cmd))
4028 return;
4029
4030 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4031 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4032 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4033 goto free_pages;
4034 }
4035 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4036 transport_all_task_dev_remove_state(cmd);
4037 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4038
4039 transport_release_tasks(cmd);
4040free_pages:
4041 transport_free_pages(cmd);
4042 transport_free_se_cmd(cmd);
4043 CMD_TFO(cmd)->release_cmd_direct(cmd);
4044}
4045
4046static int transport_generic_remove(
4047 struct se_cmd *cmd,
4048 int release_to_pool,
4049 int session_reinstatement)
4050{
4051 unsigned long flags;
4052
4053 if (!(T_TASK(cmd)))
4054 goto release_cmd;
4055
4056 if (transport_dec_and_check(cmd)) {
4057 if (session_reinstatement) {
4058 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4059 transport_all_task_dev_remove_state(cmd);
4060 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
4061 flags);
4062 }
4063 return 1;
4064 }
4065
4066 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
4067 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
4068 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4069 goto free_pages;
4070 }
4071 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
4072 transport_all_task_dev_remove_state(cmd);
4073 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
4074
4075 transport_release_tasks(cmd);
4076free_pages:
4077 transport_free_pages(cmd);
4078
4079release_cmd:
4080 if (release_to_pool) {
4081 transport_release_cmd_to_pool(cmd);
4082 } else {
4083 transport_free_se_cmd(cmd);
4084 CMD_TFO(cmd)->release_cmd_direct(cmd);
4085 }
4086
4087 return 0;
4088}
4089
4090/*
4091 * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
4092 * @cmd: Associated se_cmd descriptor
4093 * @mem: SGL style memory for TCM WRITE / READ
4094 * @sg_mem_num: Number of SGL elements
4095 * @mem_bidi_in: SGL style memory for TCM BIDI READ
4096 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
4097 *
4098 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
4099 * of parameters.
4100 */
4101int transport_generic_map_mem_to_cmd(
4102 struct se_cmd *cmd,
4103 struct scatterlist *mem,
4104 u32 sg_mem_num,
4105 struct scatterlist *mem_bidi_in,
4106 u32 sg_mem_bidi_num)
4107{
4108 u32 se_mem_cnt_out = 0;
4109 int ret;
4110
4111 if (!(mem) || !(sg_mem_num))
4112 return 0;
4113 /*
4114 * Passed *mem will contain a list_head containing preformatted
4115 * struct se_mem elements...
4116 */
4117 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
4118 if ((mem_bidi_in) || (sg_mem_bidi_num)) {
4119 printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
4120 " with BIDI-COMMAND\n");
4121 return -ENOSYS;
4122 }
4123
4124 T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
4125 T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
4126 cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
4127 return 0;
4128 }
4129 /*
4130 * Otherwise, assume the caller is passing a struct scatterlist
4131 * array from include/linux/scatterlist.h
4132 */
4133 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
4134 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
4135 /*
4136 * For CDB using TCM struct se_mem linked list scatterlist memory
4137 * processed into a TCM struct se_subsystem_dev, we do the mapping
4138 * from the passed physical memory to struct se_mem->se_page here.
4139 */
4140 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4141 if (!(T_TASK(cmd)->t_mem_list))
4142 return -ENOMEM;
4143
4144 ret = transport_map_sg_to_mem(cmd,
4145 T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
4146 if (ret < 0)
4147 return -ENOMEM;
4148
4149 T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
4150 /*
4151 * Setup BIDI READ list of struct se_mem elements
4152 */
4153 if ((mem_bidi_in) && (sg_mem_bidi_num)) {
4154 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4155 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4156 kfree(T_TASK(cmd)->t_mem_list);
4157 return -ENOMEM;
4158 }
4159 se_mem_cnt_out = 0;
4160
4161 ret = transport_map_sg_to_mem(cmd,
4162 T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
4163 &se_mem_cnt_out);
4164 if (ret < 0) {
4165 kfree(T_TASK(cmd)->t_mem_list);
4166 return -ENOMEM;
4167 }
4168
4169 T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
4170 }
4171 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4172
4173 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
4174 if (mem_bidi_in || sg_mem_bidi_num) {
4175 printk(KERN_ERR "BIDI-Commands not supported using "
4176 "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
4177 return -ENOSYS;
4178 }
4179 /*
4180 * For incoming CDBs using a contiguous buffer internall with TCM,
4181 * save the passed struct scatterlist memory. After TCM storage object
4182 * processing has completed for this struct se_cmd, TCM core will call
4183 * transport_memcpy_[write,read]_contig() as necessary from
4184 * transport_generic_complete_ok() and transport_write_pending() in order
4185 * to copy the TCM buffer to/from the original passed *mem in SGL ->
4186 * struct scatterlist format.
4187 */
4188 cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
4189 T_TASK(cmd)->t_task_pt_sgl = mem;
4190 }
4191
4192 return 0;
4193}
4194EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
4195
4196
4197static inline long long transport_dev_end_lba(struct se_device *dev)
4198{
4199 return dev->transport->get_blocks(dev) + 1;
4200}
4201
4202static int transport_get_sectors(struct se_cmd *cmd)
4203{
4204 struct se_device *dev = SE_DEV(cmd);
4205
4206 T_TASK(cmd)->t_tasks_sectors =
4207 (cmd->data_length / DEV_ATTRIB(dev)->block_size);
4208 if (!(T_TASK(cmd)->t_tasks_sectors))
4209 T_TASK(cmd)->t_tasks_sectors = 1;
4210
4211 if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
4212 return 0;
4213
4214 if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
4215 transport_dev_end_lba(dev)) {
4216 printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
4217 " transport_dev_end_lba(): %llu\n",
4218 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4219 transport_dev_end_lba(dev));
4220 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4221 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
4222 return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
4223 }
4224
4225 return 0;
4226}
4227
4228static int transport_new_cmd_obj(struct se_cmd *cmd)
4229{
4230 struct se_device *dev = SE_DEV(cmd);
4231 u32 task_cdbs = 0, rc;
4232
4233 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
4234 task_cdbs++;
4235 T_TASK(cmd)->t_task_cdbs++;
4236 } else {
4237 int set_counts = 1;
4238
4239 /*
4240 * Setup any BIDI READ tasks and memory from
4241 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
4242 * are queued first for the non pSCSI passthrough case.
4243 */
4244 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4245 (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
4246 rc = transport_generic_get_cdb_count(cmd,
4247 T_TASK(cmd)->t_task_lba,
4248 T_TASK(cmd)->t_tasks_sectors,
4249 DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
4250 set_counts);
4251 if (!(rc)) {
4252 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4253 cmd->scsi_sense_reason =
4254 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4255 return PYX_TRANSPORT_LU_COMM_FAILURE;
4256 }
4257 set_counts = 0;
4258 }
4259 /*
4260 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
4261 * Note for BIDI transfers this will contain the WRITE payload
4262 */
4263 task_cdbs = transport_generic_get_cdb_count(cmd,
4264 T_TASK(cmd)->t_task_lba,
4265 T_TASK(cmd)->t_tasks_sectors,
4266 cmd->data_direction, T_TASK(cmd)->t_mem_list,
4267 set_counts);
4268 if (!(task_cdbs)) {
4269 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
4270 cmd->scsi_sense_reason =
4271 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
4272 return PYX_TRANSPORT_LU_COMM_FAILURE;
4273 }
4274 T_TASK(cmd)->t_task_cdbs += task_cdbs;
4275
4276#if 0
4277 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
4278 " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
4279 T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
4280 T_TASK(cmd)->t_task_cdbs);
4281#endif
4282 }
4283
4284 atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
4285 atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
4286 atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
4287 return 0;
4288}
4289
4290static struct list_head *transport_init_se_mem_list(void)
4291{
4292 struct list_head *se_mem_list;
4293
4294 se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
4295 if (!(se_mem_list)) {
4296 printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
4297 return NULL;
4298 }
4299 INIT_LIST_HEAD(se_mem_list);
4300
4301 return se_mem_list;
4302}
4303
4304static int
4305transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
4306{
4307 unsigned char *buf;
4308 struct se_mem *se_mem;
4309
4310 T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
4311 if (!(T_TASK(cmd)->t_mem_list))
4312 return -ENOMEM;
4313
4314 /*
4315 * If the device uses memory mapping this is enough.
4316 */
4317 if (cmd->se_dev->transport->do_se_mem_map)
4318 return 0;
4319
4320 /*
4321 * Setup BIDI-COMMAND READ list of struct se_mem elements
4322 */
4323 if (T_TASK(cmd)->t_tasks_bidi) {
4324 T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
4325 if (!(T_TASK(cmd)->t_mem_bidi_list)) {
4326 kfree(T_TASK(cmd)->t_mem_list);
4327 return -ENOMEM;
4328 }
4329 }
4330
4331 while (length) {
4332 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4333 if (!(se_mem)) {
4334 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4335 goto out;
4336 }
4337 INIT_LIST_HEAD(&se_mem->se_list);
4338 se_mem->se_len = (length > dma_size) ? dma_size : length;
4339
4340/* #warning FIXME Allocate contigous pages for struct se_mem elements */
4341 se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
4342 if (!(se_mem->se_page)) {
4343 printk(KERN_ERR "alloc_pages() failed\n");
4344 goto out;
4345 }
4346
4347 buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
4348 if (!(buf)) {
4349 printk(KERN_ERR "kmap_atomic() failed\n");
4350 goto out;
4351 }
4352 memset(buf, 0, se_mem->se_len);
4353 kunmap_atomic(buf, KM_IRQ0);
4354
4355 list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
4356 T_TASK(cmd)->t_tasks_se_num++;
4357
4358 DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
4359 " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
4360 se_mem->se_off);
4361
4362 length -= se_mem->se_len;
4363 }
4364
4365 DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
4366 T_TASK(cmd)->t_tasks_se_num);
4367
4368 return 0;
4369out:
4370 return -1;
4371}
4372
4373extern u32 transport_calc_sg_num(
4374 struct se_task *task,
4375 struct se_mem *in_se_mem,
4376 u32 task_offset)
4377{
4378 struct se_cmd *se_cmd = task->task_se_cmd;
4379 struct se_device *se_dev = SE_DEV(se_cmd);
4380 struct se_mem *se_mem = in_se_mem;
4381 struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
4382 u32 sg_length, task_size = task->task_size, task_sg_num_padded;
4383
4384 while (task_size != 0) {
4385 DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
4386 " se_mem->se_off(%u) task_offset(%u)\n",
4387 se_mem->se_page, se_mem->se_len,
4388 se_mem->se_off, task_offset);
4389
4390 if (task_offset == 0) {
4391 if (task_size >= se_mem->se_len) {
4392 sg_length = se_mem->se_len;
4393
4394 if (!(list_is_last(&se_mem->se_list,
4395 T_TASK(se_cmd)->t_mem_list)))
4396 se_mem = list_entry(se_mem->se_list.next,
4397 struct se_mem, se_list);
4398 } else {
4399 sg_length = task_size;
4400 task_size -= sg_length;
4401 goto next;
4402 }
4403
4404 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4405 sg_length, task_size);
4406 } else {
4407 if ((se_mem->se_len - task_offset) > task_size) {
4408 sg_length = task_size;
4409 task_size -= sg_length;
4410 goto next;
4411 } else {
4412 sg_length = (se_mem->se_len - task_offset);
4413
4414 if (!(list_is_last(&se_mem->se_list,
4415 T_TASK(se_cmd)->t_mem_list)))
4416 se_mem = list_entry(se_mem->se_list.next,
4417 struct se_mem, se_list);
4418 }
4419
4420 DEBUG_SC("sg_length(%u) task_size(%u)\n",
4421 sg_length, task_size);
4422
4423 task_offset = 0;
4424 }
4425 task_size -= sg_length;
4426next:
4427 DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
4428 task->task_no, task_size);
4429
4430 task->task_sg_num++;
4431 }
4432 /*
4433 * Check if the fabric module driver is requesting that all
4434 * struct se_task->task_sg[] be chained together.. If so,
4435 * then allocate an extra padding SG entry for linking and
4436 * marking the end of the chained SGL.
4437 */
4438 if (tfo->task_sg_chaining) {
4439 task_sg_num_padded = (task->task_sg_num + 1);
4440 task->task_padded_sg = 1;
4441 } else
4442 task_sg_num_padded = task->task_sg_num;
4443
4444 task->task_sg = kzalloc(task_sg_num_padded *
4445 sizeof(struct scatterlist), GFP_KERNEL);
4446 if (!(task->task_sg)) {
4447 printk(KERN_ERR "Unable to allocate memory for"
4448 " task->task_sg\n");
4449 return 0;
4450 }
4451 sg_init_table(&task->task_sg[0], task_sg_num_padded);
4452 /*
4453 * Setup task->task_sg_bidi for SCSI READ payload for
4454 * TCM/pSCSI passthrough if present for BIDI-COMMAND
4455 */
4456 if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
4457 (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
4458 task->task_sg_bidi = kzalloc(task_sg_num_padded *
4459 sizeof(struct scatterlist), GFP_KERNEL);
4460 if (!(task->task_sg_bidi)) {
4461 printk(KERN_ERR "Unable to allocate memory for"
4462 " task->task_sg_bidi\n");
4463 return 0;
4464 }
4465 sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
4466 }
4467 /*
4468 * For the chaining case, setup the proper end of SGL for the
4469 * initial submission struct task into struct se_subsystem_api.
4470 * This will be cleared later by transport_do_task_sg_chain()
4471 */
4472 if (task->task_padded_sg) {
4473 sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
4474 /*
4475 * Added the 'if' check before marking end of bi-directional
4476 * scatterlist (which gets created only in case of request
4477 * (RD + WR).
4478 */
4479 if (task->task_sg_bidi)
4480 sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
4481 }
4482
4483 DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
4484 " task_sg_num_padded(%u)\n", task->task_sg_num,
4485 task_sg_num_padded);
4486
4487 return task->task_sg_num;
4488}
4489
4490static inline int transport_set_tasks_sectors_disk(
4491 struct se_task *task,
4492 struct se_device *dev,
4493 unsigned long long lba,
4494 u32 sectors,
4495 int *max_sectors_set)
4496{
4497 if ((lba + sectors) > transport_dev_end_lba(dev)) {
4498 task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
4499
4500 if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
4501 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4502 *max_sectors_set = 1;
4503 }
4504 } else {
4505 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4506 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4507 *max_sectors_set = 1;
4508 } else
4509 task->task_sectors = sectors;
4510 }
4511
4512 return 0;
4513}
4514
4515static inline int transport_set_tasks_sectors_non_disk(
4516 struct se_task *task,
4517 struct se_device *dev,
4518 unsigned long long lba,
4519 u32 sectors,
4520 int *max_sectors_set)
4521{
4522 if (sectors > DEV_ATTRIB(dev)->max_sectors) {
4523 task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
4524 *max_sectors_set = 1;
4525 } else
4526 task->task_sectors = sectors;
4527
4528 return 0;
4529}
4530
4531static inline int transport_set_tasks_sectors(
4532 struct se_task *task,
4533 struct se_device *dev,
4534 unsigned long long lba,
4535 u32 sectors,
4536 int *max_sectors_set)
4537{
4538 return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
4539 transport_set_tasks_sectors_disk(task, dev, lba, sectors,
4540 max_sectors_set) :
4541 transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
4542 max_sectors_set);
4543}
4544
4545static int transport_map_sg_to_mem(
4546 struct se_cmd *cmd,
4547 struct list_head *se_mem_list,
4548 void *in_mem,
4549 u32 *se_mem_cnt)
4550{
4551 struct se_mem *se_mem;
4552 struct scatterlist *sg;
4553 u32 sg_count = 1, cmd_size = cmd->data_length;
4554
4555 if (!in_mem) {
4556 printk(KERN_ERR "No source scatterlist\n");
4557 return -1;
4558 }
4559 sg = (struct scatterlist *)in_mem;
4560
4561 while (cmd_size) {
4562 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
4563 if (!(se_mem)) {
4564 printk(KERN_ERR "Unable to allocate struct se_mem\n");
4565 return -1;
4566 }
4567 INIT_LIST_HEAD(&se_mem->se_list);
4568 DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
4569 " sg_page: %p offset: %d length: %d\n", cmd_size,
4570 sg_page(sg), sg->offset, sg->length);
4571
4572 se_mem->se_page = sg_page(sg);
4573 se_mem->se_off = sg->offset;
4574
4575 if (cmd_size > sg->length) {
4576 se_mem->se_len = sg->length;
4577 sg = sg_next(sg);
4578 sg_count++;
4579 } else
4580 se_mem->se_len = cmd_size;
4581
4582 cmd_size -= se_mem->se_len;
4583
4584 DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
4585 *se_mem_cnt, cmd_size);
4586 DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
4587 se_mem->se_page, se_mem->se_off, se_mem->se_len);
4588
4589 list_add_tail(&se_mem->se_list, se_mem_list);
4590 (*se_mem_cnt)++;
4591 }
4592
4593 DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
4594 " struct se_mem\n", sg_count, *se_mem_cnt);
4595
4596 if (sg_count != *se_mem_cnt)
4597 BUG();
4598
4599 return 0;
4600}
4601
4602/* transport_map_mem_to_sg():
4603 *
4604 *
4605 */
4606int transport_map_mem_to_sg(
4607 struct se_task *task,
4608 struct list_head *se_mem_list,
4609 void *in_mem,
4610 struct se_mem *in_se_mem,
4611 struct se_mem **out_se_mem,
4612 u32 *se_mem_cnt,
4613 u32 *task_offset)
4614{
4615 struct se_cmd *se_cmd = task->task_se_cmd;
4616 struct se_mem *se_mem = in_se_mem;
4617 struct scatterlist *sg = (struct scatterlist *)in_mem;
4618 u32 task_size = task->task_size, sg_no = 0;
4619
4620 if (!sg) {
4621 printk(KERN_ERR "Unable to locate valid struct"
4622 " scatterlist pointer\n");
4623 return -1;
4624 }
4625
4626 while (task_size != 0) {
4627 /*
4628 * Setup the contigious array of scatterlists for
4629 * this struct se_task.
4630 */
4631 sg_assign_page(sg, se_mem->se_page);
4632
4633 if (*task_offset == 0) {
4634 sg->offset = se_mem->se_off;
4635
4636 if (task_size >= se_mem->se_len) {
4637 sg->length = se_mem->se_len;
4638
4639 if (!(list_is_last(&se_mem->se_list,
4640 T_TASK(se_cmd)->t_mem_list))) {
4641 se_mem = list_entry(se_mem->se_list.next,
4642 struct se_mem, se_list);
4643 (*se_mem_cnt)++;
4644 }
4645 } else {
4646 sg->length = task_size;
4647 /*
4648 * Determine if we need to calculate an offset
4649 * into the struct se_mem on the next go around..
4650 */
4651 task_size -= sg->length;
4652 if (!(task_size))
4653 *task_offset = sg->length;
4654
4655 goto next;
4656 }
4657
4658 } else {
4659 sg->offset = (*task_offset + se_mem->se_off);
4660
4661 if ((se_mem->se_len - *task_offset) > task_size) {
4662 sg->length = task_size;
4663 /*
4664 * Determine if we need to calculate an offset
4665 * into the struct se_mem on the next go around..
4666 */
4667 task_size -= sg->length;
4668 if (!(task_size))
4669 *task_offset += sg->length;
4670
4671 goto next;
4672 } else {
4673 sg->length = (se_mem->se_len - *task_offset);
4674
4675 if (!(list_is_last(&se_mem->se_list,
4676 T_TASK(se_cmd)->t_mem_list))) {
4677 se_mem = list_entry(se_mem->se_list.next,
4678 struct se_mem, se_list);
4679 (*se_mem_cnt)++;
4680 }
4681 }
4682
4683 *task_offset = 0;
4684 }
4685 task_size -= sg->length;
4686next:
4687 DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
4688 " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
4689 sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
4690
4691 sg_no++;
4692 if (!(task_size))
4693 break;
4694
4695 sg = sg_next(sg);
4696
4697 if (task_size > se_cmd->data_length)
4698 BUG();
4699 }
4700 *out_se_mem = se_mem;
4701
4702 DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
4703 " SGs\n", task->task_no, *se_mem_cnt, sg_no);
4704
4705 return 0;
4706}
4707
4708/*
4709 * This function can be used by HW target mode drivers to create a linked
4710 * scatterlist from all contiguously allocated struct se_task->task_sg[].
4711 * This is intended to be called during the completion path by TCM Core
4712 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
4713 */
4714void transport_do_task_sg_chain(struct se_cmd *cmd)
4715{
4716 struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
4717 struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
4718 struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
4719 struct se_task *task;
4720 struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
4721 u32 task_sg_num = 0, sg_count = 0;
4722 int i;
4723
4724 if (tfo->task_sg_chaining == 0) {
4725 printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
4726 " %s\n", tfo->get_fabric_name());
4727 dump_stack();
4728 return;
4729 }
4730 /*
4731 * Walk the struct se_task list and setup scatterlist chains
4732 * for each contiguosly allocated struct se_task->task_sg[].
4733 */
4734 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
4735 if (!(task->task_sg) || !(task->task_padded_sg))
4736 continue;
4737
4738 if (sg_head && sg_link) {
4739 sg_head_cur = &task->task_sg[0];
4740 sg_link_cur = &task->task_sg[task->task_sg_num];
4741 /*
4742 * Either add chain or mark end of scatterlist
4743 */
4744 if (!(list_is_last(&task->t_list,
4745 &T_TASK(cmd)->t_task_list))) {
4746 /*
4747 * Clear existing SGL termination bit set in
4748 * transport_calc_sg_num(), see sg_mark_end()
4749 */
4750 sg_end_cur = &task->task_sg[task->task_sg_num - 1];
4751 sg_end_cur->page_link &= ~0x02;
4752
4753 sg_chain(sg_head, task_sg_num, sg_head_cur);
4754 sg_count += (task->task_sg_num + 1);
4755 } else
4756 sg_count += task->task_sg_num;
4757
4758 sg_head = sg_head_cur;
4759 sg_link = sg_link_cur;
4760 task_sg_num = task->task_sg_num;
4761 continue;
4762 }
4763 sg_head = sg_first = &task->task_sg[0];
4764 sg_link = &task->task_sg[task->task_sg_num];
4765 task_sg_num = task->task_sg_num;
4766 /*
4767 * Check for single task..
4768 */
4769 if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
4770 /*
4771 * Clear existing SGL termination bit set in
4772 * transport_calc_sg_num(), see sg_mark_end()
4773 */
4774 sg_end = &task->task_sg[task->task_sg_num - 1];
4775 sg_end->page_link &= ~0x02;
4776 sg_count += (task->task_sg_num + 1);
4777 } else
4778 sg_count += task->task_sg_num;
4779 }
4780 /*
4781 * Setup the starting pointer and total t_tasks_sg_linked_no including
4782 * padding SGs for linking and to mark the end.
4783 */
4784 T_TASK(cmd)->t_tasks_sg_chained = sg_first;
4785 T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
4786
4787 DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
4788 " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
4789 T_TASK(cmd)->t_tasks_sg_chained_no);
4790
4791 for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
4792 T_TASK(cmd)->t_tasks_sg_chained_no, i) {
4793
4794 DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
4795 sg, sg_page(sg), sg->length, sg->offset);
4796 if (sg_is_chain(sg))
4797 DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
4798 if (sg_is_last(sg))
4799 DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
4800 }
4801
4802}
4803EXPORT_SYMBOL(transport_do_task_sg_chain);
4804
4805static int transport_do_se_mem_map(
4806 struct se_device *dev,
4807 struct se_task *task,
4808 struct list_head *se_mem_list,
4809 void *in_mem,
4810 struct se_mem *in_se_mem,
4811 struct se_mem **out_se_mem,
4812 u32 *se_mem_cnt,
4813 u32 *task_offset_in)
4814{
4815 u32 task_offset = *task_offset_in;
4816 int ret = 0;
4817 /*
4818 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
4819 * has been done by the transport plugin.
4820 */
4821 if (TRANSPORT(dev)->do_se_mem_map) {
4822 ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
4823 in_mem, in_se_mem, out_se_mem, se_mem_cnt,
4824 task_offset_in);
4825 if (ret == 0)
4826 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
4827
4828 return ret;
4829 }
4830 /*
4831 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4832 * WRITE payloads.. If we need to do BIDI READ passthrough for
4833 * TCM/pSCSI the first call to transport_do_se_mem_map ->
4834 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
4835 * allocation for task->task_sg_bidi, and the subsequent call to
4836 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
4837 */
4838 if (!(task->task_sg_bidi)) {
4839 /*
4840 * Assume default that transport plugin speaks preallocated
4841 * scatterlists.
4842 */
4843 if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
4844 return -1;
4845 /*
4846 * struct se_task->task_sg now contains the struct scatterlist array.
4847 */
4848 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
4849 in_se_mem, out_se_mem, se_mem_cnt,
4850 task_offset_in);
4851 }
4852 /*
4853 * Handle the se_mem_list -> struct task->task_sg_bidi
4854 * memory map for the extra BIDI READ payload
4855 */
4856 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
4857 in_se_mem, out_se_mem, se_mem_cnt,
4858 task_offset_in);
4859}
4860
4861static u32 transport_generic_get_cdb_count(
4862 struct se_cmd *cmd,
4863 unsigned long long lba,
4864 u32 sectors,
4865 enum dma_data_direction data_direction,
4866 struct list_head *mem_list,
4867 int set_counts)
4868{
4869 unsigned char *cdb = NULL;
4870 struct se_task *task;
4871 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
4872 struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
4873 struct se_device *dev = SE_DEV(cmd);
4874 int max_sectors_set = 0, ret;
4875 u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
4876
4877 if (!mem_list) {
4878 printk(KERN_ERR "mem_list is NULL in transport_generic_get"
4879 "_cdb_count()\n");
4880 return 0;
4881 }
4882 /*
4883 * While using RAMDISK_DR backstores is the only case where
4884 * mem_list will ever be empty at this point.
4885 */
4886 if (!(list_empty(mem_list)))
4887 se_mem = list_entry(mem_list->next, struct se_mem, se_list);
4888 /*
4889 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
4890 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
4891 */
4892 if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
4893 !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
4894 (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
4895 se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
4896 struct se_mem, se_list);
4897
4898 while (sectors) {
4899 DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
4900 CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
4901 transport_dev_end_lba(dev));
4902
4903 task = transport_generic_get_task(cmd, data_direction);
4904 if (!(task))
4905 goto out;
4906
4907 transport_set_tasks_sectors(task, dev, lba, sectors,
4908 &max_sectors_set);
4909
4910 task->task_lba = lba;
4911 lba += task->task_sectors;
4912 sectors -= task->task_sectors;
4913 task->task_size = (task->task_sectors *
4914 DEV_ATTRIB(dev)->block_size);
4915
4916 cdb = TRANSPORT(dev)->get_cdb(task);
4917 if ((cdb)) {
4918 memcpy(cdb, T_TASK(cmd)->t_task_cdb,
4919 scsi_command_size(T_TASK(cmd)->t_task_cdb));
4920 cmd->transport_split_cdb(task->task_lba,
4921 &task->task_sectors, cdb);
4922 }
4923
4924 /*
4925 * Perform the SE OBJ plugin and/or Transport plugin specific
4926 * mapping for T_TASK(cmd)->t_mem_list. And setup the
4927 * task->task_sg and if necessary task->task_sg_bidi
4928 */
4929 ret = transport_do_se_mem_map(dev, task, mem_list,
4930 NULL, se_mem, &se_mem_lout, &se_mem_cnt,
4931 &task_offset_in);
4932 if (ret < 0)
4933 goto out;
4934
4935 se_mem = se_mem_lout;
4936 /*
4937 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
4938 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
4939 *
4940 * Note that the first call to transport_do_se_mem_map() above will
4941 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
4942 * -> transport_calc_sg_num(), and the second here will do the
4943 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
4944 */
4945 if (task->task_sg_bidi != NULL) {
4946 ret = transport_do_se_mem_map(dev, task,
4947 T_TASK(cmd)->t_mem_bidi_list, NULL,
4948 se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
4949 &task_offset_in);
4950 if (ret < 0)
4951 goto out;
4952
4953 se_mem_bidi = se_mem_bidi_lout;
4954 }
4955 task_cdbs++;
4956
4957 DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
4958 task_cdbs, task->task_sg_num);
4959
4960 if (max_sectors_set) {
4961 max_sectors_set = 0;
4962 continue;
4963 }
4964
4965 if (!sectors)
4966 break;
4967 }
4968
4969 if (set_counts) {
4970 atomic_inc(&T_TASK(cmd)->t_fe_count);
4971 atomic_inc(&T_TASK(cmd)->t_se_count);
4972 }
4973
4974 DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
4975 CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
4976 ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
4977
4978 return task_cdbs;
4979out:
4980 return 0;
4981}
4982
4983static int
4984transport_map_control_cmd_to_task(struct se_cmd *cmd)
4985{
4986 struct se_device *dev = SE_DEV(cmd);
4987 unsigned char *cdb;
4988 struct se_task *task;
4989 int ret;
4990
4991 task = transport_generic_get_task(cmd, cmd->data_direction);
4992 if (!task)
4993 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
4994
4995 cdb = TRANSPORT(dev)->get_cdb(task);
4996 if (cdb)
4997 memcpy(cdb, cmd->t_task->t_task_cdb,
4998 scsi_command_size(cmd->t_task->t_task_cdb));
4999
5000 task->task_size = cmd->data_length;
5001 task->task_sg_num =
5002 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
5003
5004 atomic_inc(&cmd->t_task->t_fe_count);
5005 atomic_inc(&cmd->t_task->t_se_count);
5006
5007 if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
5008 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5009 u32 se_mem_cnt = 0, task_offset = 0;
5010
5011 BUG_ON(list_empty(cmd->t_task->t_mem_list));
5012
5013 ret = transport_do_se_mem_map(dev, task,
5014 cmd->t_task->t_mem_list, NULL, se_mem,
5015 &se_mem_lout, &se_mem_cnt, &task_offset);
5016 if (ret < 0)
5017 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5018
5019 if (dev->transport->map_task_SG)
5020 return dev->transport->map_task_SG(task);
5021 return 0;
5022 } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
5023 if (dev->transport->map_task_non_SG)
5024 return dev->transport->map_task_non_SG(task);
5025 return 0;
5026 } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
5027 if (dev->transport->cdb_none)
5028 return dev->transport->cdb_none(task);
5029 return 0;
5030 } else {
5031 BUG();
5032 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5033 }
5034}
5035
5036/* transport_generic_new_cmd(): Called from transport_processing_thread()
5037 *
5038 * Allocate storage transport resources from a set of values predefined
5039 * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
5040 * Any non zero return here is treated as an "out of resource' op here.
5041 */
5042 /*
5043 * Generate struct se_task(s) and/or their payloads for this CDB.
5044 */
5045static int transport_generic_new_cmd(struct se_cmd *cmd)
5046{
5047 struct se_portal_group *se_tpg;
5048 struct se_task *task;
5049 struct se_device *dev = SE_DEV(cmd);
5050 int ret = 0;
5051
5052 /*
5053 * Determine is the TCM fabric module has already allocated physical
5054 * memory, and is directly calling transport_generic_map_mem_to_cmd()
5055 * to setup beforehand the linked list of physical memory at
5056 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
5057 */
5058 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
5059 ret = transport_allocate_resources(cmd);
5060 if (ret < 0)
5061 return ret;
5062 }
5063
5064 ret = transport_get_sectors(cmd);
5065 if (ret < 0)
5066 return ret;
5067
5068 ret = transport_new_cmd_obj(cmd);
5069 if (ret < 0)
5070 return ret;
5071
5072 /*
5073 * Determine if the calling TCM fabric module is talking to
5074 * Linux/NET via kernel sockets and needs to allocate a
5075 * struct iovec array to complete the struct se_cmd
5076 */
5077 se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
5078 if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
5079 ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
5080 if (ret < 0)
5081 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
5082 }
5083
5084 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
5085 list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
5086 if (atomic_read(&task->task_sent))
5087 continue;
5088 if (!dev->transport->map_task_SG)
5089 continue;
5090
5091 ret = dev->transport->map_task_SG(task);
5092 if (ret < 0)
5093 return ret;
5094 }
5095 } else {
5096 ret = transport_map_control_cmd_to_task(cmd);
5097 if (ret < 0)
5098 return ret;
5099 }
5100
5101 /*
5102 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
5103 * This WRITE struct se_cmd (and all of its associated struct se_task's)
5104 * will be added to the struct se_device execution queue after its WRITE
5105 * data has arrived. (ie: It gets handled by the transport processing
5106 * thread a second time)
5107 */
5108 if (cmd->data_direction == DMA_TO_DEVICE) {
5109 transport_add_tasks_to_state_queue(cmd);
5110 return transport_generic_write_pending(cmd);
5111 }
5112 /*
5113 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
5114 * to the execution queue.
5115 */
5116 transport_execute_tasks(cmd);
5117 return 0;
5118}
5119
5120/* transport_generic_process_write():
5121 *
5122 *
5123 */
5124void transport_generic_process_write(struct se_cmd *cmd)
5125{
5126#if 0
5127 /*
5128 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
5129 * original EDTL
5130 */
5131 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
5132 if (!T_TASK(cmd)->t_tasks_se_num) {
5133 unsigned char *dst, *buf =
5134 (unsigned char *)T_TASK(cmd)->t_task_buf;
5135
5136 dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
5137 if (!(dst)) {
5138 printk(KERN_ERR "Unable to allocate memory for"
5139 " WRITE underflow\n");
5140 transport_generic_request_failure(cmd, NULL,
5141 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5142 return;
5143 }
5144 memcpy(dst, buf, cmd->cmd_spdtl);
5145
5146 kfree(T_TASK(cmd)->t_task_buf);
5147 T_TASK(cmd)->t_task_buf = dst;
5148 } else {
5149 struct scatterlist *sg =
5150 (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
5151 struct scatterlist *orig_sg;
5152
5153 orig_sg = kzalloc(sizeof(struct scatterlist) *
5154 T_TASK(cmd)->t_tasks_se_num,
5155 GFP_KERNEL))) {
5156 if (!(orig_sg)) {
5157 printk(KERN_ERR "Unable to allocate memory"
5158 " for WRITE underflow\n");
5159 transport_generic_request_failure(cmd, NULL,
5160 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5161 return;
5162 }
5163
5164 memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
5165 sizeof(struct scatterlist) *
5166 T_TASK(cmd)->t_tasks_se_num);
5167
5168 cmd->data_length = cmd->cmd_spdtl;
5169 /*
5170 * FIXME, clear out original struct se_task and state
5171 * information.
5172 */
5173 if (transport_generic_new_cmd(cmd) < 0) {
5174 transport_generic_request_failure(cmd, NULL,
5175 PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
5176 kfree(orig_sg);
5177 return;
5178 }
5179
5180 transport_memcpy_write_sg(cmd, orig_sg);
5181 }
5182 }
5183#endif
5184 transport_execute_tasks(cmd);
5185}
5186EXPORT_SYMBOL(transport_generic_process_write);
5187
5188/* transport_generic_write_pending():
5189 *
5190 *
5191 */
5192static int transport_generic_write_pending(struct se_cmd *cmd)
5193{
5194 unsigned long flags;
5195 int ret;
5196
5197 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5198 cmd->t_state = TRANSPORT_WRITE_PENDING;
5199 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5200 /*
5201 * For the TCM control CDBs using a contiguous buffer, do the memcpy
5202 * from the passed Linux/SCSI struct scatterlist located at
5203 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
5204 * T_TASK(se_cmd)->t_task_buf.
5205 */
5206 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
5207 transport_memcpy_read_contig(cmd,
5208 T_TASK(cmd)->t_task_buf,
5209 T_TASK(cmd)->t_task_pt_sgl);
5210 /*
5211 * Clear the se_cmd for WRITE_PENDING status in order to set
5212 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
5213 * can be called from HW target mode interrupt code. This is safe
5214 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
5215 * because the se_cmd->se_lun pointer is not being cleared.
5216 */
5217 transport_cmd_check_stop(cmd, 1, 0);
5218
5219 /*
5220 * Call the fabric write_pending function here to let the
5221 * frontend know that WRITE buffers are ready.
5222 */
5223 ret = CMD_TFO(cmd)->write_pending(cmd);
5224 if (ret < 0)
5225 return ret;
5226
5227 return PYX_TRANSPORT_WRITE_PENDING;
5228}
5229
5230/* transport_release_cmd_to_pool():
5231 *
5232 *
5233 */
5234void transport_release_cmd_to_pool(struct se_cmd *cmd)
5235{
5236 BUG_ON(!T_TASK(cmd));
5237 BUG_ON(!CMD_TFO(cmd));
5238
5239 transport_free_se_cmd(cmd);
5240 CMD_TFO(cmd)->release_cmd_to_pool(cmd);
5241}
5242EXPORT_SYMBOL(transport_release_cmd_to_pool);
5243
5244/* transport_generic_free_cmd():
5245 *
5246 * Called from processing frontend to release storage engine resources
5247 */
5248void transport_generic_free_cmd(
5249 struct se_cmd *cmd,
5250 int wait_for_tasks,
5251 int release_to_pool,
5252 int session_reinstatement)
5253{
5254 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
5255 transport_release_cmd_to_pool(cmd);
5256 else {
5257 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
5258
5259 if (SE_LUN(cmd)) {
5260#if 0
5261 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
5262 " SE_LUN(cmd)\n", cmd,
5263 CMD_TFO(cmd)->get_task_tag(cmd));
5264#endif
5265 transport_lun_remove_cmd(cmd);
5266 }
5267
5268 if (wait_for_tasks && cmd->transport_wait_for_tasks)
5269 cmd->transport_wait_for_tasks(cmd, 0, 0);
5270
5271 transport_generic_remove(cmd, release_to_pool,
5272 session_reinstatement);
5273 }
5274}
5275EXPORT_SYMBOL(transport_generic_free_cmd);
5276
5277static void transport_nop_wait_for_tasks(
5278 struct se_cmd *cmd,
5279 int remove_cmd,
5280 int session_reinstatement)
5281{
5282 return;
5283}
5284
5285/* transport_lun_wait_for_tasks():
5286 *
5287 * Called from ConfigFS context to stop the passed struct se_cmd to allow
5288 * an struct se_lun to be successfully shutdown.
5289 */
5290static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
5291{
5292 unsigned long flags;
5293 int ret;
5294 /*
5295 * If the frontend has already requested this struct se_cmd to
5296 * be stopped, we can safely ignore this struct se_cmd.
5297 */
5298 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5299 if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
5300 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5301 DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
5302 " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
5303 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5304 transport_cmd_check_stop(cmd, 1, 0);
5305 return -1;
5306 }
5307 atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
5308 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5309
5310 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5311
5312 ret = transport_stop_tasks_for_cmd(cmd);
5313
5314 DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
5315 " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
5316 if (!ret) {
5317 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
5318 CMD_TFO(cmd)->get_task_tag(cmd));
5319 wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
5320 DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
5321 CMD_TFO(cmd)->get_task_tag(cmd));
5322 }
5323 transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
5324
5325 return 0;
5326}
5327
5328/* #define DEBUG_CLEAR_LUN */
5329#ifdef DEBUG_CLEAR_LUN
5330#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
5331#else
5332#define DEBUG_CLEAR_L(x...)
5333#endif
5334
5335static void __transport_clear_lun_from_sessions(struct se_lun *lun)
5336{
5337 struct se_cmd *cmd = NULL;
5338 unsigned long lun_flags, cmd_flags;
5339 /*
5340 * Do exception processing and return CHECK_CONDITION status to the
5341 * Initiator Port.
5342 */
5343 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5344 while (!list_empty_careful(&lun->lun_cmd_list)) {
5345 cmd = list_entry(lun->lun_cmd_list.next,
5346 struct se_cmd, se_lun_list);
5347 list_del(&cmd->se_lun_list);
5348
5349 if (!(T_TASK(cmd))) {
5350 printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
5351 "[i,t]_state: %u/%u\n",
5352 CMD_TFO(cmd)->get_task_tag(cmd),
5353 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5354 BUG();
5355 }
5356 atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
5357 /*
5358 * This will notify iscsi_target_transport.c:
5359 * transport_cmd_check_stop() that a LUN shutdown is in
5360 * progress for the iscsi_cmd_t.
5361 */
5362 spin_lock(&T_TASK(cmd)->t_state_lock);
5363 DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
5364 "_lun_stop for ITT: 0x%08x\n",
5365 SE_LUN(cmd)->unpacked_lun,
5366 CMD_TFO(cmd)->get_task_tag(cmd));
5367 atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
5368 spin_unlock(&T_TASK(cmd)->t_state_lock);
5369
5370 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5371
5372 if (!(SE_LUN(cmd))) {
5373 printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
5374 CMD_TFO(cmd)->get_task_tag(cmd),
5375 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
5376 BUG();
5377 }
5378 /*
5379 * If the Storage engine still owns the iscsi_cmd_t, determine
5380 * and/or stop its context.
5381 */
5382 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
5383 "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
5384 CMD_TFO(cmd)->get_task_tag(cmd));
5385
5386 if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
5387 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5388 continue;
5389 }
5390
5391 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
5392 "_wait_for_tasks(): SUCCESS\n",
5393 SE_LUN(cmd)->unpacked_lun,
5394 CMD_TFO(cmd)->get_task_tag(cmd));
5395
5396 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5397 if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
5398 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5399 goto check_cond;
5400 }
5401 atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
5402 transport_all_task_dev_remove_state(cmd);
5403 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5404
5405 transport_free_dev_tasks(cmd);
5406 /*
5407 * The Storage engine stopped this struct se_cmd before it was
5408 * send to the fabric frontend for delivery back to the
5409 * Initiator Node. Return this SCSI CDB back with an
5410 * CHECK_CONDITION status.
5411 */
5412check_cond:
5413 transport_send_check_condition_and_sense(cmd,
5414 TCM_NON_EXISTENT_LUN, 0);
5415 /*
5416 * If the fabric frontend is waiting for this iscsi_cmd_t to
5417 * be released, notify the waiting thread now that LU has
5418 * finished accessing it.
5419 */
5420 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
5421 if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
5422 DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
5423 " struct se_cmd: %p ITT: 0x%08x\n",
5424 lun->unpacked_lun,
5425 cmd, CMD_TFO(cmd)->get_task_tag(cmd));
5426
5427 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
5428 cmd_flags);
5429 transport_cmd_check_stop(cmd, 1, 0);
5430 complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5431 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5432 continue;
5433 }
5434 DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
5435 lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
5436
5437 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
5438 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
5439 }
5440 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
5441}
5442
5443static int transport_clear_lun_thread(void *p)
5444{
5445 struct se_lun *lun = (struct se_lun *)p;
5446
5447 __transport_clear_lun_from_sessions(lun);
5448 complete(&lun->lun_shutdown_comp);
5449
5450 return 0;
5451}
5452
5453int transport_clear_lun_from_sessions(struct se_lun *lun)
5454{
5455 struct task_struct *kt;
5456
5457 kt = kthread_run(transport_clear_lun_thread, (void *)lun,
5458 "tcm_cl_%u", lun->unpacked_lun);
5459 if (IS_ERR(kt)) {
5460 printk(KERN_ERR "Unable to start clear_lun thread\n");
5461 return -1;
5462 }
5463 wait_for_completion(&lun->lun_shutdown_comp);
5464
5465 return 0;
5466}
5467
5468/* transport_generic_wait_for_tasks():
5469 *
5470 * Called from frontend or passthrough context to wait for storage engine
5471 * to pause and/or release frontend generated struct se_cmd.
5472 */
5473static void transport_generic_wait_for_tasks(
5474 struct se_cmd *cmd,
5475 int remove_cmd,
5476 int session_reinstatement)
5477{
5478 unsigned long flags;
5479
5480 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
5481 return;
5482
5483 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5484 /*
5485 * If we are already stopped due to an external event (ie: LUN shutdown)
5486 * sleep until the connection can have the passed struct se_cmd back.
5487 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
5488 * transport_clear_lun_from_sessions() once the ConfigFS context caller
5489 * has completed its operation on the struct se_cmd.
5490 */
5491 if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
5492
5493 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
5494 " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
5495 "_stop_comp); for ITT: 0x%08x\n",
5496 CMD_TFO(cmd)->get_task_tag(cmd));
5497 /*
5498 * There is a special case for WRITES where a FE exception +
5499 * LUN shutdown means ConfigFS context is still sleeping on
5500 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
5501 * We go ahead and up transport_lun_stop_comp just to be sure
5502 * here.
5503 */
5504 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5505 complete(&T_TASK(cmd)->transport_lun_stop_comp);
5506 wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
5507 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5508
5509 transport_all_task_dev_remove_state(cmd);
5510 /*
5511 * At this point, the frontend who was the originator of this
5512 * struct se_cmd, now owns the structure and can be released through
5513 * normal means below.
5514 */
5515 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
5516 " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
5517 "stop_comp); for ITT: 0x%08x\n",
5518 CMD_TFO(cmd)->get_task_tag(cmd));
5519
5520 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5521 }
5522 if (!atomic_read(&T_TASK(cmd)->t_transport_active))
5523 goto remove;
5524
5525 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
5526
5527 DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
5528 " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
5529 " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
5530 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
5531 cmd->deferred_t_state);
5532
5533 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5534
5535 wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
5536
5537 wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
5538
5539 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5540 atomic_set(&T_TASK(cmd)->t_transport_active, 0);
5541 atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
5542
5543 DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
5544 "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
5545 CMD_TFO(cmd)->get_task_tag(cmd));
5546remove:
5547 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5548 if (!remove_cmd)
5549 return;
5550
5551 transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
5552}
5553
5554static int transport_get_sense_codes(
5555 struct se_cmd *cmd,
5556 u8 *asc,
5557 u8 *ascq)
5558{
5559 *asc = cmd->scsi_asc;
5560 *ascq = cmd->scsi_ascq;
5561
5562 return 0;
5563}
5564
5565static int transport_set_sense_codes(
5566 struct se_cmd *cmd,
5567 u8 asc,
5568 u8 ascq)
5569{
5570 cmd->scsi_asc = asc;
5571 cmd->scsi_ascq = ascq;
5572
5573 return 0;
5574}
5575
5576int transport_send_check_condition_and_sense(
5577 struct se_cmd *cmd,
5578 u8 reason,
5579 int from_transport)
5580{
5581 unsigned char *buffer = cmd->sense_buffer;
5582 unsigned long flags;
5583 int offset;
5584 u8 asc = 0, ascq = 0;
5585
5586 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5587 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
5588 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5589 return 0;
5590 }
5591 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
5592 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
5593
5594 if (!reason && from_transport)
5595 goto after_reason;
5596
5597 if (!from_transport)
5598 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
5599 /*
5600 * Data Segment and SenseLength of the fabric response PDU.
5601 *
5602 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
5603 * from include/scsi/scsi_cmnd.h
5604 */
5605 offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
5606 TRANSPORT_SENSE_BUFFER);
5607 /*
5608 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
5609 * SENSE KEY values from include/scsi/scsi.h
5610 */
5611 switch (reason) {
5612 case TCM_NON_EXISTENT_LUN:
5613 case TCM_UNSUPPORTED_SCSI_OPCODE:
5614 case TCM_SECTOR_COUNT_TOO_MANY:
5615 /* CURRENT ERROR */
5616 buffer[offset] = 0x70;
5617 /* ILLEGAL REQUEST */
5618 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5619 /* INVALID COMMAND OPERATION CODE */
5620 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
5621 break;
5622 case TCM_UNKNOWN_MODE_PAGE:
5623 /* CURRENT ERROR */
5624 buffer[offset] = 0x70;
5625 /* ILLEGAL REQUEST */
5626 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5627 /* INVALID FIELD IN CDB */
5628 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5629 break;
5630 case TCM_CHECK_CONDITION_ABORT_CMD:
5631 /* CURRENT ERROR */
5632 buffer[offset] = 0x70;
5633 /* ABORTED COMMAND */
5634 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5635 /* BUS DEVICE RESET FUNCTION OCCURRED */
5636 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
5637 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
5638 break;
5639 case TCM_INCORRECT_AMOUNT_OF_DATA:
5640 /* CURRENT ERROR */
5641 buffer[offset] = 0x70;
5642 /* ABORTED COMMAND */
5643 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5644 /* WRITE ERROR */
5645 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5646 /* NOT ENOUGH UNSOLICITED DATA */
5647 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
5648 break;
5649 case TCM_INVALID_CDB_FIELD:
5650 /* CURRENT ERROR */
5651 buffer[offset] = 0x70;
5652 /* ABORTED COMMAND */
5653 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5654 /* INVALID FIELD IN CDB */
5655 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
5656 break;
5657 case TCM_INVALID_PARAMETER_LIST:
5658 /* CURRENT ERROR */
5659 buffer[offset] = 0x70;
5660 /* ABORTED COMMAND */
5661 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5662 /* INVALID FIELD IN PARAMETER LIST */
5663 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
5664 break;
5665 case TCM_UNEXPECTED_UNSOLICITED_DATA:
5666 /* CURRENT ERROR */
5667 buffer[offset] = 0x70;
5668 /* ABORTED COMMAND */
5669 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5670 /* WRITE ERROR */
5671 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
5672 /* UNEXPECTED_UNSOLICITED_DATA */
5673 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
5674 break;
5675 case TCM_SERVICE_CRC_ERROR:
5676 /* CURRENT ERROR */
5677 buffer[offset] = 0x70;
5678 /* ABORTED COMMAND */
5679 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5680 /* PROTOCOL SERVICE CRC ERROR */
5681 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
5682 /* N/A */
5683 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
5684 break;
5685 case TCM_SNACK_REJECTED:
5686 /* CURRENT ERROR */
5687 buffer[offset] = 0x70;
5688 /* ABORTED COMMAND */
5689 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
5690 /* READ ERROR */
5691 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
5692 /* FAILED RETRANSMISSION REQUEST */
5693 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
5694 break;
5695 case TCM_WRITE_PROTECTED:
5696 /* CURRENT ERROR */
5697 buffer[offset] = 0x70;
5698 /* DATA PROTECT */
5699 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
5700 /* WRITE PROTECTED */
5701 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
5702 break;
5703 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
5704 /* CURRENT ERROR */
5705 buffer[offset] = 0x70;
5706 /* UNIT ATTENTION */
5707 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
5708 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
5709 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5710 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5711 break;
5712 case TCM_CHECK_CONDITION_NOT_READY:
5713 /* CURRENT ERROR */
5714 buffer[offset] = 0x70;
5715 /* Not Ready */
5716 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
5717 transport_get_sense_codes(cmd, &asc, &ascq);
5718 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
5719 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
5720 break;
5721 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
5722 default:
5723 /* CURRENT ERROR */
5724 buffer[offset] = 0x70;
5725 /* ILLEGAL REQUEST */
5726 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
5727 /* LOGICAL UNIT COMMUNICATION FAILURE */
5728 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
5729 break;
5730 }
5731 /*
5732 * This code uses linux/include/scsi/scsi.h SAM status codes!
5733 */
5734 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
5735 /*
5736 * Automatically padded, this value is encoded in the fabric's
5737 * data_length response PDU containing the SCSI defined sense data.
5738 */
5739 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
5740
5741after_reason:
5742 CMD_TFO(cmd)->queue_status(cmd);
5743 return 0;
5744}
5745EXPORT_SYMBOL(transport_send_check_condition_and_sense);
5746
5747int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
5748{
5749 int ret = 0;
5750
5751 if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
5752 if (!(send_status) ||
5753 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
5754 return 1;
5755#if 0
5756 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
5757 " status for CDB: 0x%02x ITT: 0x%08x\n",
5758 T_TASK(cmd)->t_task_cdb[0],
5759 CMD_TFO(cmd)->get_task_tag(cmd));
5760#endif
5761 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
5762 CMD_TFO(cmd)->queue_status(cmd);
5763 ret = 1;
5764 }
5765 return ret;
5766}
5767EXPORT_SYMBOL(transport_check_aborted_status);
5768
5769void transport_send_task_abort(struct se_cmd *cmd)
5770{
5771 /*
5772 * If there are still expected incoming fabric WRITEs, we wait
5773 * until until they have completed before sending a TASK_ABORTED
5774 * response. This response with TASK_ABORTED status will be
5775 * queued back to fabric module by transport_check_aborted_status().
5776 */
5777 if (cmd->data_direction == DMA_TO_DEVICE) {
5778 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
5779 atomic_inc(&T_TASK(cmd)->t_transport_aborted);
5780 smp_mb__after_atomic_inc();
5781 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5782 transport_new_cmd_failure(cmd);
5783 return;
5784 }
5785 }
5786 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
5787#if 0
5788 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
5789 " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
5790 CMD_TFO(cmd)->get_task_tag(cmd));
5791#endif
5792 CMD_TFO(cmd)->queue_status(cmd);
5793}
5794
5795/* transport_generic_do_tmr():
5796 *
5797 *
5798 */
5799int transport_generic_do_tmr(struct se_cmd *cmd)
5800{
5801 struct se_cmd *ref_cmd;
5802 struct se_device *dev = SE_DEV(cmd);
5803 struct se_tmr_req *tmr = cmd->se_tmr_req;
5804 int ret;
5805
5806 switch (tmr->function) {
5807 case ABORT_TASK:
5808 ref_cmd = tmr->ref_cmd;
5809 tmr->response = TMR_FUNCTION_REJECTED;
5810 break;
5811 case ABORT_TASK_SET:
5812 case CLEAR_ACA:
5813 case CLEAR_TASK_SET:
5814 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
5815 break;
5816 case LUN_RESET:
5817 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
5818 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
5819 TMR_FUNCTION_REJECTED;
5820 break;
5821#if 0
5822 case TARGET_WARM_RESET:
5823 transport_generic_host_reset(dev->se_hba);
5824 tmr->response = TMR_FUNCTION_REJECTED;
5825 break;
5826 case TARGET_COLD_RESET:
5827 transport_generic_host_reset(dev->se_hba);
5828 transport_generic_cold_reset(dev->se_hba);
5829 tmr->response = TMR_FUNCTION_REJECTED;
5830 break;
5831#endif
5832 default:
5833 printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
5834 tmr->function);
5835 tmr->response = TMR_FUNCTION_REJECTED;
5836 break;
5837 }
5838
5839 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
5840 CMD_TFO(cmd)->queue_tm_rsp(cmd);
5841
5842 transport_cmd_check_stop(cmd, 2, 0);
5843 return 0;
5844}
5845
5846/*
5847 * Called with spin_lock_irq(&dev->execute_task_lock); held
5848 *
5849 */
5850static struct se_task *
5851transport_get_task_from_state_list(struct se_device *dev)
5852{
5853 struct se_task *task;
5854
5855 if (list_empty(&dev->state_task_list))
5856 return NULL;
5857
5858 list_for_each_entry(task, &dev->state_task_list, t_state_list)
5859 break;
5860
5861 list_del(&task->t_state_list);
5862 atomic_set(&task->task_state_active, 0);
5863
5864 return task;
5865}
5866
5867static void transport_processing_shutdown(struct se_device *dev)
5868{
5869 struct se_cmd *cmd;
5870 struct se_queue_req *qr;
5871 struct se_task *task;
5872 u8 state;
5873 unsigned long flags;
5874 /*
5875 * Empty the struct se_device's struct se_task state list.
5876 */
5877 spin_lock_irqsave(&dev->execute_task_lock, flags);
5878 while ((task = transport_get_task_from_state_list(dev))) {
5879 if (!(TASK_CMD(task))) {
5880 printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
5881 continue;
5882 }
5883 cmd = TASK_CMD(task);
5884
5885 if (!T_TASK(cmd)) {
5886 printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
5887 " %p ITT: 0x%08x\n", task, cmd,
5888 CMD_TFO(cmd)->get_task_tag(cmd));
5889 continue;
5890 }
5891 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
5892
5893 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5894
5895 DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
5896 " i_state/def_i_state: %d/%d, t_state/def_t_state:"
5897 " %d/%d cdb: 0x%02x\n", cmd, task,
5898 CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
5899 CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
5900 cmd->t_state, cmd->deferred_t_state,
5901 T_TASK(cmd)->t_task_cdb[0]);
5902 DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
5903 " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
5904 " t_transport_stop: %d t_transport_sent: %d\n",
5905 CMD_TFO(cmd)->get_task_tag(cmd),
5906 T_TASK(cmd)->t_task_cdbs,
5907 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
5908 atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
5909 atomic_read(&T_TASK(cmd)->t_transport_active),
5910 atomic_read(&T_TASK(cmd)->t_transport_stop),
5911 atomic_read(&T_TASK(cmd)->t_transport_sent));
5912
5913 if (atomic_read(&task->task_active)) {
5914 atomic_set(&task->task_stop, 1);
5915 spin_unlock_irqrestore(
5916 &T_TASK(cmd)->t_state_lock, flags);
5917
5918 DEBUG_DO("Waiting for task: %p to shutdown for dev:"
5919 " %p\n", task, dev);
5920 wait_for_completion(&task->task_stop_comp);
5921 DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
5922 task, dev);
5923
5924 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
5925 atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
5926
5927 atomic_set(&task->task_active, 0);
5928 atomic_set(&task->task_stop, 0);
5929 }
5930 __transport_stop_task_timer(task, &flags);
5931
5932 if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
5933 spin_unlock_irqrestore(
5934 &T_TASK(cmd)->t_state_lock, flags);
5935
5936 DEBUG_DO("Skipping task: %p, dev: %p for"
5937 " t_task_cdbs_ex_left: %d\n", task, dev,
5938 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
5939
5940 spin_lock_irqsave(&dev->execute_task_lock, flags);
5941 continue;
5942 }
5943
5944 if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
5945 DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
5946 " %p\n", task, dev);
5947
5948 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
5949 spin_unlock_irqrestore(
5950 &T_TASK(cmd)->t_state_lock, flags);
5951 transport_send_check_condition_and_sense(
5952 cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
5953 0);
5954 transport_remove_cmd_from_queue(cmd,
5955 SE_DEV(cmd)->dev_queue_obj);
5956
5957 transport_lun_remove_cmd(cmd);
5958 transport_cmd_check_stop(cmd, 1, 0);
5959 } else {
5960 spin_unlock_irqrestore(
5961 &T_TASK(cmd)->t_state_lock, flags);
5962
5963 transport_remove_cmd_from_queue(cmd,
5964 SE_DEV(cmd)->dev_queue_obj);
5965
5966 transport_lun_remove_cmd(cmd);
5967
5968 if (transport_cmd_check_stop(cmd, 1, 0))
5969 transport_generic_remove(cmd, 0, 0);
5970 }
5971
5972 spin_lock_irqsave(&dev->execute_task_lock, flags);
5973 continue;
5974 }
5975 DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
5976 task, dev);
5977
5978 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
5979 spin_unlock_irqrestore(
5980 &T_TASK(cmd)->t_state_lock, flags);
5981 transport_send_check_condition_and_sense(cmd,
5982 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
5983 transport_remove_cmd_from_queue(cmd,
5984 SE_DEV(cmd)->dev_queue_obj);
5985
5986 transport_lun_remove_cmd(cmd);
5987 transport_cmd_check_stop(cmd, 1, 0);
5988 } else {
5989 spin_unlock_irqrestore(
5990 &T_TASK(cmd)->t_state_lock, flags);
5991
5992 transport_remove_cmd_from_queue(cmd,
5993 SE_DEV(cmd)->dev_queue_obj);
5994 transport_lun_remove_cmd(cmd);
5995
5996 if (transport_cmd_check_stop(cmd, 1, 0))
5997 transport_generic_remove(cmd, 0, 0);
5998 }
5999
6000 spin_lock_irqsave(&dev->execute_task_lock, flags);
6001 }
6002 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
6003 /*
6004 * Empty the struct se_device's struct se_cmd list.
6005 */
6006 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6007 while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
6008 spin_unlock_irqrestore(
6009 &dev->dev_queue_obj->cmd_queue_lock, flags);
6010 cmd = (struct se_cmd *)qr->cmd;
6011 state = qr->state;
6012 kfree(qr);
6013
6014 DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
6015 cmd, state);
6016
6017 if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
6018 transport_send_check_condition_and_sense(cmd,
6019 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
6020
6021 transport_lun_remove_cmd(cmd);
6022 transport_cmd_check_stop(cmd, 1, 0);
6023 } else {
6024 transport_lun_remove_cmd(cmd);
6025 if (transport_cmd_check_stop(cmd, 1, 0))
6026 transport_generic_remove(cmd, 0, 0);
6027 }
6028 spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
6029 }
6030 spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
6031}
6032
6033/* transport_processing_thread():
6034 *
6035 *
6036 */
6037static int transport_processing_thread(void *param)
6038{
6039 int ret, t_state;
6040 struct se_cmd *cmd;
6041 struct se_device *dev = (struct se_device *) param;
6042 struct se_queue_req *qr;
6043
6044 set_user_nice(current, -20);
6045
6046 while (!kthread_should_stop()) {
6047 ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
6048 atomic_read(&dev->dev_queue_obj->queue_cnt) ||
6049 kthread_should_stop());
6050 if (ret < 0)
6051 goto out;
6052
6053 spin_lock_irq(&dev->dev_status_lock);
6054 if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
6055 spin_unlock_irq(&dev->dev_status_lock);
6056 transport_processing_shutdown(dev);
6057 continue;
6058 }
6059 spin_unlock_irq(&dev->dev_status_lock);
6060
6061get_cmd:
6062 __transport_execute_tasks(dev);
6063
6064 qr = transport_get_qr_from_queue(dev->dev_queue_obj);
6065 if (!(qr))
6066 continue;
6067
6068 cmd = (struct se_cmd *)qr->cmd;
6069 t_state = qr->state;
6070 kfree(qr);
6071
6072 switch (t_state) {
6073 case TRANSPORT_NEW_CMD_MAP:
6074 if (!(CMD_TFO(cmd)->new_cmd_map)) {
6075 printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
6076 " NULL for TRANSPORT_NEW_CMD_MAP\n");
6077 BUG();
6078 }
6079 ret = CMD_TFO(cmd)->new_cmd_map(cmd);
6080 if (ret < 0) {
6081 cmd->transport_error_status = ret;
6082 transport_generic_request_failure(cmd, NULL,
6083 0, (cmd->data_direction !=
6084 DMA_TO_DEVICE));
6085 break;
6086 }
6087 /* Fall through */
6088 case TRANSPORT_NEW_CMD:
6089 ret = transport_generic_new_cmd(cmd);
6090 if (ret < 0) {
6091 cmd->transport_error_status = ret;
6092 transport_generic_request_failure(cmd, NULL,
6093 0, (cmd->data_direction !=
6094 DMA_TO_DEVICE));
6095 }
6096 break;
6097 case TRANSPORT_PROCESS_WRITE:
6098 transport_generic_process_write(cmd);
6099 break;
6100 case TRANSPORT_COMPLETE_OK:
6101 transport_stop_all_task_timers(cmd);
6102 transport_generic_complete_ok(cmd);
6103 break;
6104 case TRANSPORT_REMOVE:
6105 transport_generic_remove(cmd, 1, 0);
6106 break;
6107 case TRANSPORT_PROCESS_TMR:
6108 transport_generic_do_tmr(cmd);
6109 break;
6110 case TRANSPORT_COMPLETE_FAILURE:
6111 transport_generic_request_failure(cmd, NULL, 1, 1);
6112 break;
6113 case TRANSPORT_COMPLETE_TIMEOUT:
6114 transport_stop_all_task_timers(cmd);
6115 transport_generic_request_timeout(cmd);
6116 break;
6117 default:
6118 printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
6119 " %d for ITT: 0x%08x i_state: %d on SE LUN:"
6120 " %u\n", t_state, cmd->deferred_t_state,
6121 CMD_TFO(cmd)->get_task_tag(cmd),
6122 CMD_TFO(cmd)->get_cmd_state(cmd),
6123 SE_LUN(cmd)->unpacked_lun);
6124 BUG();
6125 }
6126
6127 goto get_cmd;
6128 }
6129
6130out:
6131 transport_release_all_cmds(dev);
6132 dev->process_thread = NULL;
6133 return 0;
6134}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 000000000000..a2ef346087e8
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
1/*******************************************************************************
2 * Filename: target_core_ua.c
3 *
4 * This file contains logic for SPC-3 Unit Attention emulation
5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27#include <linux/version.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32
33#include <target/target_core_base.h>
34#include <target/target_core_device.h>
35#include <target/target_core_transport.h>
36#include <target/target_core_fabric_ops.h>
37#include <target/target_core_configfs.h>
38
39#include "target_core_alua.h"
40#include "target_core_hba.h"
41#include "target_core_pr.h"
42#include "target_core_ua.h"
43
44int core_scsi3_ua_check(
45 struct se_cmd *cmd,
46 unsigned char *cdb)
47{
48 struct se_dev_entry *deve;
49 struct se_session *sess = cmd->se_sess;
50 struct se_node_acl *nacl;
51
52 if (!(sess))
53 return 0;
54
55 nacl = sess->se_node_acl;
56 if (!(nacl))
57 return 0;
58
59 deve = &nacl->device_list[cmd->orig_fe_lun];
60 if (!(atomic_read(&deve->ua_count)))
61 return 0;
62 /*
63 * From sam4r14, section 5.14 Unit attention condition:
64 *
65 * a) if an INQUIRY command enters the enabled command state, the
66 * device server shall process the INQUIRY command and shall neither
67 * report nor clear any unit attention condition;
68 * b) if a REPORT LUNS command enters the enabled command state, the
69 * device server shall process the REPORT LUNS command and shall not
70 * report any unit attention condition;
71 * e) if a REQUEST SENSE command enters the enabled command state while
72 * a unit attention condition exists for the SCSI initiator port
73 * associated with the I_T nexus on which the REQUEST SENSE command
74 * was received, then the device server shall process the command
75 * and either:
76 */
77 switch (cdb[0]) {
78 case INQUIRY:
79 case REPORT_LUNS:
80 case REQUEST_SENSE:
81 return 0;
82 default:
83 return -1;
84 }
85
86 return -1;
87}
88
89int core_scsi3_ua_allocate(
90 struct se_node_acl *nacl,
91 u32 unpacked_lun,
92 u8 asc,
93 u8 ascq)
94{
95 struct se_dev_entry *deve;
96 struct se_ua *ua, *ua_p, *ua_tmp;
97 /*
98 * PASSTHROUGH OPS
99 */
100 if (!(nacl))
101 return -1;
102
103 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
104 if (!(ua)) {
105 printk(KERN_ERR "Unable to allocate struct se_ua\n");
106 return -1;
107 }
108 INIT_LIST_HEAD(&ua->ua_dev_list);
109 INIT_LIST_HEAD(&ua->ua_nacl_list);
110
111 ua->ua_nacl = nacl;
112 ua->ua_asc = asc;
113 ua->ua_ascq = ascq;
114
115 spin_lock_irq(&nacl->device_list_lock);
116 deve = &nacl->device_list[unpacked_lun];
117
118 spin_lock(&deve->ua_lock);
119 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
120 /*
121 * Do not report the same UNIT ATTENTION twice..
122 */
123 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
124 spin_unlock(&deve->ua_lock);
125 spin_unlock_irq(&nacl->device_list_lock);
126 kmem_cache_free(se_ua_cache, ua);
127 return 0;
128 }
129 /*
130 * Attach the highest priority Unit Attention to
131 * the head of the list following sam4r14,
132 * Section 5.14 Unit Attention Condition:
133 *
134 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
135 * POWER ON OCCURRED or
136 * DEVICE INTERNAL RESET
137 * SCSI BUS RESET OCCURRED or
138 * MICROCODE HAS BEEN CHANGED or
139 * protocol specific
140 * BUS DEVICE RESET FUNCTION OCCURRED
141 * I_T NEXUS LOSS OCCURRED
142 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
143 * all others Lowest
144 *
145 * Each of the ASCQ codes listed above are defined in
146 * the 29h ASC family, see spc4r17 Table D.1
147 */
148 if (ua_p->ua_asc == 0x29) {
149 if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
150 list_add(&ua->ua_nacl_list,
151 &deve->ua_list);
152 else
153 list_add_tail(&ua->ua_nacl_list,
154 &deve->ua_list);
155 } else if (ua_p->ua_asc == 0x2a) {
156 /*
157 * Incoming Family 29h ASCQ codes will override
158 * Family 2AHh ASCQ codes for Unit Attention condition.
159 */
160 if ((asc == 0x29) || (ascq > ua_p->ua_asc))
161 list_add(&ua->ua_nacl_list,
162 &deve->ua_list);
163 else
164 list_add_tail(&ua->ua_nacl_list,
165 &deve->ua_list);
166 } else
167 list_add_tail(&ua->ua_nacl_list,
168 &deve->ua_list);
169 spin_unlock(&deve->ua_lock);
170 spin_unlock_irq(&nacl->device_list_lock);
171
172 atomic_inc(&deve->ua_count);
173 smp_mb__after_atomic_inc();
174 return 0;
175 }
176 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
177 spin_unlock(&deve->ua_lock);
178 spin_unlock_irq(&nacl->device_list_lock);
179
180 printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
181 " 0x%02x, ASCQ: 0x%02x\n",
182 TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
183 asc, ascq);
184
185 atomic_inc(&deve->ua_count);
186 smp_mb__after_atomic_inc();
187 return 0;
188}
189
190void core_scsi3_ua_release_all(
191 struct se_dev_entry *deve)
192{
193 struct se_ua *ua, *ua_p;
194
195 spin_lock(&deve->ua_lock);
196 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
197 list_del(&ua->ua_nacl_list);
198 kmem_cache_free(se_ua_cache, ua);
199
200 atomic_dec(&deve->ua_count);
201 smp_mb__after_atomic_dec();
202 }
203 spin_unlock(&deve->ua_lock);
204}
205
206void core_scsi3_ua_for_check_condition(
207 struct se_cmd *cmd,
208 u8 *asc,
209 u8 *ascq)
210{
211 struct se_device *dev = SE_DEV(cmd);
212 struct se_dev_entry *deve;
213 struct se_session *sess = cmd->se_sess;
214 struct se_node_acl *nacl;
215 struct se_ua *ua = NULL, *ua_p;
216 int head = 1;
217
218 if (!(sess))
219 return;
220
221 nacl = sess->se_node_acl;
222 if (!(nacl))
223 return;
224
225 spin_lock_irq(&nacl->device_list_lock);
226 deve = &nacl->device_list[cmd->orig_fe_lun];
227 if (!(atomic_read(&deve->ua_count))) {
228 spin_unlock_irq(&nacl->device_list_lock);
229 return;
230 }
231 /*
232 * The highest priority Unit Attentions are placed at the head of the
233 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
234 * sense data for the received CDB.
235 */
236 spin_lock(&deve->ua_lock);
237 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
238 /*
239 * For ua_intlck_ctrl code not equal to 00b, only report the
240 * highest priority UNIT_ATTENTION and ASC/ASCQ without
241 * clearing it.
242 */
243 if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
244 *asc = ua->ua_asc;
245 *ascq = ua->ua_ascq;
246 break;
247 }
248 /*
249 * Otherwise for the default 00b, release the UNIT ATTENTION
250 * condition. Return the ASC/ASCQ of the higest priority UA
251 * (head of the list) in the outgoing CHECK_CONDITION + sense.
252 */
253 if (head) {
254 *asc = ua->ua_asc;
255 *ascq = ua->ua_ascq;
256 head = 0;
257 }
258 list_del(&ua->ua_nacl_list);
259 kmem_cache_free(se_ua_cache, ua);
260
261 atomic_dec(&deve->ua_count);
262 smp_mb__after_atomic_dec();
263 }
264 spin_unlock(&deve->ua_lock);
265 spin_unlock_irq(&nacl->device_list_lock);
266
267 printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
268 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
269 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
270 TPG_TFO(nacl->se_tpg)->get_fabric_name(),
271 (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
272 "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
273 cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
274}
275
276int core_scsi3_ua_clear_for_request_sense(
277 struct se_cmd *cmd,
278 u8 *asc,
279 u8 *ascq)
280{
281 struct se_dev_entry *deve;
282 struct se_session *sess = cmd->se_sess;
283 struct se_node_acl *nacl;
284 struct se_ua *ua = NULL, *ua_p;
285 int head = 1;
286
287 if (!(sess))
288 return -1;
289
290 nacl = sess->se_node_acl;
291 if (!(nacl))
292 return -1;
293
294 spin_lock_irq(&nacl->device_list_lock);
295 deve = &nacl->device_list[cmd->orig_fe_lun];
296 if (!(atomic_read(&deve->ua_count))) {
297 spin_unlock_irq(&nacl->device_list_lock);
298 return -1;
299 }
300 /*
301 * The highest priority Unit Attentions are placed at the head of the
302 * struct se_dev_entry->ua_list. The First (and hence highest priority)
303 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
304 * matching struct se_lun.
305 *
306 * Once the returning ASC/ASCQ values are set, we go ahead and
307 * release all of the Unit Attention conditions for the assoicated
308 * struct se_lun.
309 */
310 spin_lock(&deve->ua_lock);
311 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
312 if (head) {
313 *asc = ua->ua_asc;
314 *ascq = ua->ua_ascq;
315 head = 0;
316 }
317 list_del(&ua->ua_nacl_list);
318 kmem_cache_free(se_ua_cache, ua);
319
320 atomic_dec(&deve->ua_count);
321 smp_mb__after_atomic_dec();
322 }
323 spin_unlock(&deve->ua_lock);
324 spin_unlock_irq(&nacl->device_list_lock);
325
326 printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
327 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
328 " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
329 cmd->orig_fe_lun, *asc, *ascq);
330
331 return (head) ? -1 : 0;
332}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 000000000000..6e6b03460a1a
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
1#ifndef TARGET_CORE_UA_H
2
3/*
4 * From spc4r17, Table D.1: ASC and ASCQ Assignement
5 */
6#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
7#define ASCQ_29H_POWER_ON_OCCURRED 0x01
8#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
9#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
10#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
11#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
12#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
13#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
14
15#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
16#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
17#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
18#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
19#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
20#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
21#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
22#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
23#define ASCQ_2AH_PRIORITY_CHANGED 0x08
24
25#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
26
27extern struct kmem_cache *se_ua_cache;
28
29extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
30extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
31extern void core_scsi3_ua_release_all(struct se_dev_entry *);
32extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
33extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
34 u8 *, u8 *);
35
36#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687519ef..f7a5dba3ca23 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig THERMAL 5menuconfig THERMAL
6 tristate "Generic Thermal sysfs driver" 6 tristate "Generic Thermal sysfs driver"
7 depends on NET
7 help 8 help
8 Generic Thermal Sysfs driver offers a generic mechanism for 9 Generic Thermal Sysfs driver offers a generic mechanism for
9 thermal management. Usually it's made up of one or more thermal 10 thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 13c72c629329..7d0e63c79280 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,6 +32,8 @@
32#include <linux/thermal.h> 32#include <linux/thermal.h>
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/reboot.h> 34#include <linux/reboot.h>
35#include <net/netlink.h>
36#include <net/genetlink.h>
35 37
36MODULE_AUTHOR("Zhang Rui"); 38MODULE_AUTHOR("Zhang Rui");
37MODULE_DESCRIPTION("Generic thermal management sysfs support"); 39MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -58,6 +60,22 @@ static LIST_HEAD(thermal_tz_list);
58static LIST_HEAD(thermal_cdev_list); 60static LIST_HEAD(thermal_cdev_list);
59static DEFINE_MUTEX(thermal_list_lock); 61static DEFINE_MUTEX(thermal_list_lock);
60 62
63static unsigned int thermal_event_seqnum;
64
65static struct genl_family thermal_event_genl_family = {
66 .id = GENL_ID_GENERATE,
67 .name = THERMAL_GENL_FAMILY_NAME,
68 .version = THERMAL_GENL_VERSION,
69 .maxattr = THERMAL_GENL_ATTR_MAX,
70};
71
72static struct genl_multicast_group thermal_event_mcgrp = {
73 .name = THERMAL_GENL_MCAST_GROUP_NAME,
74};
75
76static int genetlink_init(void);
77static void genetlink_exit(void);
78
61static int get_idr(struct idr *idr, struct mutex *lock, int *id) 79static int get_idr(struct idr *idr, struct mutex *lock, int *id)
62{ 80{
63 int err; 81 int err;
@@ -823,11 +841,8 @@ static struct class thermal_class = {
823 * @devdata: device private data. 841 * @devdata: device private data.
824 * @ops: standard thermal cooling devices callbacks. 842 * @ops: standard thermal cooling devices callbacks.
825 */ 843 */
826struct thermal_cooling_device *thermal_cooling_device_register(char *type, 844struct thermal_cooling_device *thermal_cooling_device_register(
827 void *devdata, 845 char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
828 struct
829 thermal_cooling_device_ops
830 *ops)
831{ 846{
832 struct thermal_cooling_device *cdev; 847 struct thermal_cooling_device *cdev;
833 struct thermal_zone_device *pos; 848 struct thermal_zone_device *pos;
@@ -1048,13 +1063,9 @@ EXPORT_SYMBOL(thermal_zone_device_update);
1048 * section 11.1.5.1 of the ACPI specification 3.0. 1063 * section 11.1.5.1 of the ACPI specification 3.0.
1049 */ 1064 */
1050struct thermal_zone_device *thermal_zone_device_register(char *type, 1065struct thermal_zone_device *thermal_zone_device_register(char *type,
1051 int trips, 1066 int trips, void *devdata,
1052 void *devdata, struct 1067 const struct thermal_zone_device_ops *ops,
1053 thermal_zone_device_ops 1068 int tc1, int tc2, int passive_delay, int polling_delay)
1054 *ops, int tc1, int
1055 tc2,
1056 int passive_delay,
1057 int polling_delay)
1058{ 1069{
1059 struct thermal_zone_device *tz; 1070 struct thermal_zone_device *tz;
1060 struct thermal_cooling_device *pos; 1071 struct thermal_cooling_device *pos;
@@ -1214,6 +1225,82 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1214 1225
1215EXPORT_SYMBOL(thermal_zone_device_unregister); 1226EXPORT_SYMBOL(thermal_zone_device_unregister);
1216 1227
1228int generate_netlink_event(u32 orig, enum events event)
1229{
1230 struct sk_buff *skb;
1231 struct nlattr *attr;
1232 struct thermal_genl_event *thermal_event;
1233 void *msg_header;
1234 int size;
1235 int result;
1236
1237 /* allocate memory */
1238 size = nla_total_size(sizeof(struct thermal_genl_event)) + \
1239 nla_total_size(0);
1240
1241 skb = genlmsg_new(size, GFP_ATOMIC);
1242 if (!skb)
1243 return -ENOMEM;
1244
1245 /* add the genetlink message header */
1246 msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
1247 &thermal_event_genl_family, 0,
1248 THERMAL_GENL_CMD_EVENT);
1249 if (!msg_header) {
1250 nlmsg_free(skb);
1251 return -ENOMEM;
1252 }
1253
1254 /* fill the data */
1255 attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
1256 sizeof(struct thermal_genl_event));
1257
1258 if (!attr) {
1259 nlmsg_free(skb);
1260 return -EINVAL;
1261 }
1262
1263 thermal_event = nla_data(attr);
1264 if (!thermal_event) {
1265 nlmsg_free(skb);
1266 return -EINVAL;
1267 }
1268
1269 memset(thermal_event, 0, sizeof(struct thermal_genl_event));
1270
1271 thermal_event->orig = orig;
1272 thermal_event->event = event;
1273
1274 /* send multicast genetlink message */
1275 result = genlmsg_end(skb, msg_header);
1276 if (result < 0) {
1277 nlmsg_free(skb);
1278 return result;
1279 }
1280
1281 result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
1282 if (result)
1283 printk(KERN_INFO "failed to send netlink event:%d", result);
1284
1285 return result;
1286}
1287EXPORT_SYMBOL(generate_netlink_event);
1288
1289static int genetlink_init(void)
1290{
1291 int result;
1292
1293 result = genl_register_family(&thermal_event_genl_family);
1294 if (result)
1295 return result;
1296
1297 result = genl_register_mc_group(&thermal_event_genl_family,
1298 &thermal_event_mcgrp);
1299 if (result)
1300 genl_unregister_family(&thermal_event_genl_family);
1301 return result;
1302}
1303
1217static int __init thermal_init(void) 1304static int __init thermal_init(void)
1218{ 1305{
1219 int result = 0; 1306 int result = 0;
@@ -1225,9 +1312,15 @@ static int __init thermal_init(void)
1225 mutex_destroy(&thermal_idr_lock); 1312 mutex_destroy(&thermal_idr_lock);
1226 mutex_destroy(&thermal_list_lock); 1313 mutex_destroy(&thermal_list_lock);
1227 } 1314 }
1315 result = genetlink_init();
1228 return result; 1316 return result;
1229} 1317}
1230 1318
1319static void genetlink_exit(void)
1320{
1321 genl_unregister_family(&thermal_event_genl_family);
1322}
1323
1231static void __exit thermal_exit(void) 1324static void __exit thermal_exit(void)
1232{ 1325{
1233 class_unregister(&thermal_class); 1326 class_unregister(&thermal_class);
@@ -1235,7 +1328,8 @@ static void __exit thermal_exit(void)
1235 idr_destroy(&thermal_cdev_idr); 1328 idr_destroy(&thermal_cdev_idr);
1236 mutex_destroy(&thermal_idr_lock); 1329 mutex_destroy(&thermal_idr_lock);
1237 mutex_destroy(&thermal_list_lock); 1330 mutex_destroy(&thermal_list_lock);
1331 genetlink_exit();
1238} 1332}
1239 1333
1240subsys_initcall(thermal_init); 1334fs_initcall(thermal_init);
1241module_exit(thermal_exit); 1335module_exit(thermal_exit);
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c43ef48b1a0f..396277216e4f 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_N_GSM) += n_gsm.o
9obj-$(CONFIG_R3964) += n_r3964.o 9obj-$(CONFIG_R3964) += n_r3964.o
10 10
11obj-y += vt/ 11obj-y += vt/
12obj-$(CONFIG_HVC_DRIVER) += hvc/
13obj-y += serial/
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 000000000000..d79e7e9bf9d2
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,12 @@
1obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
2obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
3obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
4obj-$(CONFIG_HVC_TILE) += hvc_tile.o
5obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
6obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
7obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
8obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
9obj-$(CONFIG_HVC_XEN) += hvc_xen.o
10obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
11obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
12obj-$(CONFIG_HVCS) += hvcs.o
diff --git a/drivers/char/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
index 5fe4631e2a61..5fe4631e2a61 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/tty/hvc/hvc_beat.c
diff --git a/drivers/char/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e9cba13ee800..e9cba13ee800 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
diff --git a/drivers/char/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 54381eba4e4a..54381eba4e4a 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
diff --git a/drivers/char/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 6470f63deb4b..6470f63deb4b 100644
--- a/drivers/char/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
diff --git a/drivers/char/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index 2623e177e8d6..2623e177e8d6 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
diff --git a/drivers/char/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
index 21c54955084e..21c54955084e 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/tty/hvc/hvc_iseries.c
diff --git a/drivers/char/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index c3425bb3a1f6..c3425bb3a1f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
diff --git a/drivers/char/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 61c4a61558d9..61c4a61558d9 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
diff --git a/drivers/char/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 7a84a0595477..7a84a0595477 100644
--- a/drivers/char/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
diff --git a/drivers/char/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index b0957e61a7be..b0957e61a7be 100644
--- a/drivers/char/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
diff --git a/drivers/char/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 5e2f52b33327..5e2f52b33327 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
diff --git a/drivers/char/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..3740e327f180 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
diff --git a/drivers/char/hvcs.c b/drivers/tty/hvc/hvcs.c
index bedc6c1b6fa5..bedc6c1b6fa5 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
diff --git a/drivers/char/hvsi.c b/drivers/tty/hvc/hvsi.c
index 67a75a502c01..67a75a502c01 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412a04e8..aa2e5d3eb01a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2414 2414
2415 gsm->initiator = c->initiator; 2415 gsm->initiator = c->initiator;
2416 gsm->mru = c->mru; 2416 gsm->mru = c->mru;
2417 gsm->mtu = c->mtu;
2417 gsm->encoding = c->encapsulation; 2418 gsm->encoding = c->encapsulation;
2418 gsm->adaption = c->adaption; 2419 gsm->adaption = c->adaption;
2419 gsm->n2 = c->n2; 2420 gsm->n2 = c->n2;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d32281032c..52fc0c9a6364 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -581,8 +581,9 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
581 __u8 __user *buf, size_t nr) 581 __u8 __user *buf, size_t nr)
582{ 582{
583 struct n_hdlc *n_hdlc = tty2n_hdlc(tty); 583 struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
584 int ret; 584 int ret = 0;
585 struct n_hdlc_buf *rbuf; 585 struct n_hdlc_buf *rbuf;
586 DECLARE_WAITQUEUE(wait, current);
586 587
587 if (debuglevel >= DEBUG_LEVEL_INFO) 588 if (debuglevel >= DEBUG_LEVEL_INFO)
588 printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__); 589 printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
@@ -598,57 +599,55 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
598 return -EFAULT; 599 return -EFAULT;
599 } 600 }
600 601
601 tty_lock(); 602 add_wait_queue(&tty->read_wait, &wait);
602 603
603 for (;;) { 604 for (;;) {
604 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 605 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
605 tty_unlock(); 606 ret = -EIO;
606 return -EIO; 607 break;
607 } 608 }
609 if (tty_hung_up_p(file))
610 break;
608 611
609 n_hdlc = tty2n_hdlc (tty); 612 set_current_state(TASK_INTERRUPTIBLE);
610 if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
611 tty != n_hdlc->tty) {
612 tty_unlock();
613 return 0;
614 }
615 613
616 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); 614 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
617 if (rbuf) 615 if (rbuf) {
616 if (rbuf->count > nr) {
617 /* too large for caller's buffer */
618 ret = -EOVERFLOW;
619 } else {
620 if (copy_to_user(buf, rbuf->buf, rbuf->count))
621 ret = -EFAULT;
622 else
623 ret = rbuf->count;
624 }
625
626 if (n_hdlc->rx_free_buf_list.count >
627 DEFAULT_RX_BUF_COUNT)
628 kfree(rbuf);
629 else
630 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
618 break; 631 break;
632 }
619 633
620 /* no data */ 634 /* no data */
621 if (file->f_flags & O_NONBLOCK) { 635 if (file->f_flags & O_NONBLOCK) {
622 tty_unlock(); 636 ret = -EAGAIN;
623 return -EAGAIN; 637 break;
624 } 638 }
625 639
626 interruptible_sleep_on (&tty->read_wait); 640 schedule();
641
627 if (signal_pending(current)) { 642 if (signal_pending(current)) {
628 tty_unlock(); 643 ret = -EINTR;
629 return -EINTR; 644 break;
630 } 645 }
631 } 646 }
632 647
633 if (rbuf->count > nr) 648 remove_wait_queue(&tty->read_wait, &wait);
634 /* frame too large for caller's buffer (discard frame) */ 649 __set_current_state(TASK_RUNNING);
635 ret = -EOVERFLOW; 650
636 else {
637 /* Copy the data to the caller's buffer */
638 if (copy_to_user(buf, rbuf->buf, rbuf->count))
639 ret = -EFAULT;
640 else
641 ret = rbuf->count;
642 }
643
644 /* return HDLC buffer to free list unless the free list */
645 /* count has exceeded the default value, in which case the */
646 /* buffer is freed back to the OS to conserve memory */
647 if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
648 kfree(rbuf);
649 else
650 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
651 tty_unlock();
652 return ret; 651 return ret;
653 652
654} /* end of n_hdlc_tty_read() */ 653} /* end of n_hdlc_tty_read() */
@@ -691,14 +690,15 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
691 count = maxframe; 690 count = maxframe;
692 } 691 }
693 692
694 tty_lock();
695
696 add_wait_queue(&tty->write_wait, &wait); 693 add_wait_queue(&tty->write_wait, &wait);
697 set_current_state(TASK_INTERRUPTIBLE); 694
695 for (;;) {
696 set_current_state(TASK_INTERRUPTIBLE);
698 697
699 /* Allocate transmit buffer */ 698 tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
700 /* sleep until transmit buffer available */ 699 if (tbuf)
701 while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) { 700 break;
701
702 if (file->f_flags & O_NONBLOCK) { 702 if (file->f_flags & O_NONBLOCK) {
703 error = -EAGAIN; 703 error = -EAGAIN;
704 break; 704 break;
@@ -719,7 +719,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
719 } 719 }
720 } 720 }
721 721
722 set_current_state(TASK_RUNNING); 722 __set_current_state(TASK_RUNNING);
723 remove_wait_queue(&tty->write_wait, &wait); 723 remove_wait_queue(&tty->write_wait, &wait);
724 724
725 if (!error) { 725 if (!error) {
@@ -731,7 +731,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
731 n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); 731 n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
732 n_hdlc_send_frames(n_hdlc,tty); 732 n_hdlc_send_frames(n_hdlc,tty);
733 } 733 }
734 tty_unlock(); 734
735 return error; 735 return error;
736 736
737} /* end of n_hdlc_tty_write() */ 737} /* end of n_hdlc_tty_write() */
diff --git a/drivers/serial/21285.c b/drivers/tty/serial/21285.c
index d89aa38c5cf0..d89aa38c5cf0 100644
--- a/drivers/serial/21285.c
+++ b/drivers/tty/serial/21285.c
diff --git a/drivers/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..be0ebce36e54 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
diff --git a/drivers/serial/68328serial.h b/drivers/tty/serial/68328serial.h
index 664ceb0a158c..664ceb0a158c 100644
--- a/drivers/serial/68328serial.h
+++ b/drivers/tty/serial/68328serial.h
diff --git a/drivers/serial/68360serial.c b/drivers/tty/serial/68360serial.c
index 88b13356ec10..bc21eeae8fde 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
2428 /* .read_proc = rs_360_read_proc, */ 2428 /* .read_proc = rs_360_read_proc, */
2429 .tiocmget = rs_360_tiocmget, 2429 .tiocmget = rs_360_tiocmget,
2430 .tiocmset = rs_360_tiocmset, 2430 .tiocmset = rs_360_tiocmset,
2431 .get_icount = rs_360_get_icount,
2431}; 2432};
2432 2433
2433static int __init rs_360_init(void) 2434static int __init rs_360_init(void)
diff --git a/drivers/serial/8250.c b/drivers/tty/serial/8250.c
index b25e6e490530..3975df6f7fdb 100644
--- a/drivers/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -236,7 +236,8 @@ static const struct serial8250_config uart_config[] = {
236 .fifo_size = 128, 236 .fifo_size = 128,
237 .tx_loadsz = 128, 237 .tx_loadsz = 128,
238 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 238 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
239 .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, 239 /* UART_CAP_EFR breaks billionon CF bluetooth card. */
240 .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
240 }, 241 },
241 [PORT_16654] = { 242 [PORT_16654] = {
242 .name = "ST16654", 243 .name = "ST16654",
diff --git a/drivers/serial/8250.h b/drivers/tty/serial/8250.h
index 6e19ea3e48d5..6e19ea3e48d5 100644
--- a/drivers/serial/8250.h
+++ b/drivers/tty/serial/8250.h
diff --git a/drivers/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
index 9c10262f2469..9c10262f2469 100644
--- a/drivers/serial/8250_accent.c
+++ b/drivers/tty/serial/8250_accent.c
diff --git a/drivers/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
index b0ce8c56f1a4..b0ce8c56f1a4 100644
--- a/drivers/serial/8250_acorn.c
+++ b/drivers/tty/serial/8250_acorn.c
diff --git a/drivers/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
index 3bfe0f7b26fb..3bfe0f7b26fb 100644
--- a/drivers/serial/8250_boca.c
+++ b/drivers/tty/serial/8250_boca.c
diff --git a/drivers/serial/8250_early.c b/drivers/tty/serial/8250_early.c
index eaafb98debed..eaafb98debed 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/tty/serial/8250_early.c
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
index 567143ace159..567143ace159 100644
--- a/drivers/serial/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250_exar_st16c554.c
diff --git a/drivers/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
index 6375d68b7913..6375d68b7913 100644
--- a/drivers/serial/8250_fourport.c
+++ b/drivers/tty/serial/8250_fourport.c
diff --git a/drivers/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
index d8c0ffbfa6e3..d8c0ffbfa6e3 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/tty/serial/8250_gsc.c
diff --git a/drivers/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
index c13438c93012..c13438c93012 100644
--- a/drivers/serial/8250_hp300.c
+++ b/drivers/tty/serial/8250_hp300.c
diff --git a/drivers/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
index 7609150e7d5e..7609150e7d5e 100644
--- a/drivers/serial/8250_hub6.c
+++ b/drivers/tty/serial/8250_hub6.c
diff --git a/drivers/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
index d10be944ad44..d10be944ad44 100644
--- a/drivers/serial/8250_mca.c
+++ b/drivers/tty/serial/8250_mca.c
diff --git a/drivers/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 8b8930f700b5..8b8930f700b5 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
diff --git a/drivers/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index 4822cb50cd0f..4822cb50cd0f 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
diff --git a/drivers/serial/Kconfig b/drivers/tty/serial/Kconfig
index c1df7676a73d..2b8334601c8b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -81,7 +81,7 @@ config SERIAL_8250_GSC
81 default SERIAL_8250 81 default SERIAL_8250
82 82
83config SERIAL_8250_PCI 83config SERIAL_8250_PCI
84 tristate "8250/16550 PCI device support" if EMBEDDED 84 tristate "8250/16550 PCI device support" if EXPERT
85 depends on SERIAL_8250 && PCI 85 depends on SERIAL_8250 && PCI
86 default SERIAL_8250 86 default SERIAL_8250
87 help 87 help
@@ -90,7 +90,7 @@ config SERIAL_8250_PCI
90 Saves about 9K. 90 Saves about 9K.
91 91
92config SERIAL_8250_PNP 92config SERIAL_8250_PNP
93 tristate "8250/16550 PNP device support" if EMBEDDED 93 tristate "8250/16550 PNP device support" if EXPERT
94 depends on SERIAL_8250 && PNP 94 depends on SERIAL_8250 && PNP
95 default SERIAL_8250 95 default SERIAL_8250
96 help 96 help
@@ -1518,6 +1518,7 @@ config SERIAL_BCM63XX_CONSOLE
1518config SERIAL_GRLIB_GAISLER_APBUART 1518config SERIAL_GRLIB_GAISLER_APBUART
1519 tristate "GRLIB APBUART serial support" 1519 tristate "GRLIB APBUART serial support"
1520 depends on OF 1520 depends on OF
1521 select SERIAL_CORE
1521 ---help--- 1522 ---help---
1522 Add support for the GRLIB APBUART serial port. 1523 Add support for the GRLIB APBUART serial port.
1523 1524
diff --git a/drivers/serial/Makefile b/drivers/tty/serial/Makefile
index 8ea92e9c73b0..8ea92e9c73b0 100644
--- a/drivers/serial/Makefile
+++ b/drivers/tty/serial/Makefile
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index f9b49b5ff5e1..f9b49b5ff5e1 100644
--- a/drivers/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
diff --git a/drivers/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 721216292a50..721216292a50 100644
--- a/drivers/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
diff --git a/drivers/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 2904aa044126..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
diff --git a/drivers/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e76d7d000128..e76d7d000128 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
diff --git a/drivers/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 095a5d562618..095a5d562618 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
diff --git a/drivers/serial/apbuart.h b/drivers/tty/serial/apbuart.h
index 5faf87c8d2bc..5faf87c8d2bc 100644
--- a/drivers/serial/apbuart.h
+++ b/drivers/tty/serial/apbuart.h
diff --git a/drivers/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3892666b5fbd..2a1d52fb4936 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1732,6 +1732,11 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1732 device_init_wakeup(&pdev->dev, 1); 1732 device_init_wakeup(&pdev->dev, 1);
1733 platform_set_drvdata(pdev, port); 1733 platform_set_drvdata(pdev, port);
1734 1734
1735 if (port->rs485.flags & SER_RS485_ENABLED) {
1736 UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
1737 UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
1738 }
1739
1735 return 0; 1740 return 0;
1736 1741
1737err_add_port: 1742err_add_port:
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d0807..a1a0e55d0807 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
diff --git a/drivers/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index e381b895b04d..9b1ff2b6bb37 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
370{ 370{
371 struct bfin_serial_port *uart = dev_id; 371 struct bfin_serial_port *uart = dev_id;
372 372
373 spin_lock(&uart->port.lock);
374 while (UART_GET_LSR(uart) & DR) 373 while (UART_GET_LSR(uart) & DR)
375 bfin_serial_rx_chars(uart); 374 bfin_serial_rx_chars(uart);
376 spin_unlock(&uart->port.lock);
377 375
378 return IRQ_HANDLED; 376 return IRQ_HANDLED;
379} 377}
@@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
490{ 488{
491 int x_pos, pos; 489 int x_pos, pos;
492 490
493 dma_disable_irq(uart->tx_dma_channel); 491 dma_disable_irq_nosync(uart->rx_dma_channel);
494 dma_disable_irq(uart->rx_dma_channel); 492 spin_lock_bh(&uart->rx_lock);
495 spin_lock_bh(&uart->port.lock);
496 493
497 /* 2D DMA RX buffer ring is used. Because curr_y_count and 494 /* 2D DMA RX buffer ring is used. Because curr_y_count and
498 * curr_x_count can't be read as an atomic operation, 495 * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
523 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; 520 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
524 } 521 }
525 522
526 spin_unlock_bh(&uart->port.lock); 523 spin_unlock_bh(&uart->rx_lock);
527 dma_enable_irq(uart->tx_dma_channel);
528 dma_enable_irq(uart->rx_dma_channel); 524 dma_enable_irq(uart->rx_dma_channel);
529 525
530 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); 526 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
571 unsigned short irqstat; 567 unsigned short irqstat;
572 int x_pos, pos; 568 int x_pos, pos;
573 569
574 spin_lock(&uart->port.lock); 570 spin_lock(&uart->rx_lock);
575 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); 571 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
576 clear_dma_irqstat(uart->rx_dma_channel); 572 clear_dma_irqstat(uart->rx_dma_channel);
577 573
@@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
589 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; 585 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
590 } 586 }
591 587
592 spin_unlock(&uart->port.lock); 588 spin_unlock(&uart->rx_lock);
593 589
594 return IRQ_HANDLED; 590 return IRQ_HANDLED;
595} 591}
@@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
1332 } 1328 }
1333 1329
1334#ifdef CONFIG_SERIAL_BFIN_DMA 1330#ifdef CONFIG_SERIAL_BFIN_DMA
1331 spin_lock_init(&uart->rx_lock);
1335 uart->tx_done = 1; 1332 uart->tx_done = 1;
1336 uart->tx_count = 0; 1333 uart->tx_count = 0;
1337 1334
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index e95c524d9d18..e95c524d9d18 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1d5675..6d06ce1d5675 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
diff --git a/drivers/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b6acd19b458e..b6acd19b458e 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
index e072724ea754..e072724ea754 100644
--- a/drivers/serial/cpm_uart/Makefile
+++ b/drivers/tty/serial/cpm_uart/Makefile
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index b754dcf0fda5..b754dcf0fda5 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 8692ff98fc07..8692ff98fc07 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 3fc1d66e32c6..3fc1d66e32c6 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
index 10eecd6af6d4..10eecd6af6d4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index 814ac006393f..814ac006393f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
index 7194c63dcf5f..7194c63dcf5f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
diff --git a/drivers/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index bcc31f2140ac..bcc31f2140ac 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
diff --git a/drivers/serial/crisv10.h b/drivers/tty/serial/crisv10.h
index ea0beb46a10d..ea0beb46a10d 100644
--- a/drivers/serial/crisv10.h
+++ b/drivers/tty/serial/crisv10.h
diff --git a/drivers/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d776329..57421d776329 100644
--- a/drivers/serial/dz.c
+++ b/drivers/tty/serial/dz.c
diff --git a/drivers/serial/dz.h b/drivers/tty/serial/dz.h
index faf169ed27b3..faf169ed27b3 100644
--- a/drivers/serial/dz.h
+++ b/drivers/tty/serial/dz.h
diff --git a/drivers/serial/icom.c b/drivers/tty/serial/icom.c
index 53a468227056..53a468227056 100644
--- a/drivers/serial/icom.c
+++ b/drivers/tty/serial/icom.c
diff --git a/drivers/serial/icom.h b/drivers/tty/serial/icom.h
index c8029e0025c9..c8029e0025c9 100644
--- a/drivers/serial/icom.h
+++ b/drivers/tty/serial/icom.h
diff --git a/drivers/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ab93763862d5..ab93763862d5 100644
--- a/drivers/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
diff --git a/drivers/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index deb7b8d977dc..deb7b8d977dc 100644
--- a/drivers/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
diff --git a/drivers/serial/imx.c b/drivers/tty/serial/imx.c
index dfcf4b1878aa..dfcf4b1878aa 100644
--- a/drivers/serial/imx.c
+++ b/drivers/tty/serial/imx.c
diff --git a/drivers/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index ee43efc7bdcc..ee43efc7bdcc 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
diff --git a/drivers/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index fcfe82653ac8..fcfe82653ac8 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
diff --git a/drivers/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index ebff4a1d4bcc..ebff4a1d4bcc 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
diff --git a/drivers/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
index a59a9a8341d2..a59a9a8341d2 100644
--- a/drivers/serial/ip22zilog.h
+++ b/drivers/tty/serial/ip22zilog.h
diff --git a/drivers/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
index e46b6e0f8b18..e46b6e0f8b18 100644
--- a/drivers/serial/jsm/Makefile
+++ b/drivers/tty/serial/jsm/Makefile
diff --git a/drivers/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 38a509c684cd..38a509c684cd 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c63..18f548449c63 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 7960d9633c15..7960d9633c15 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 7a4a914ecff0..7a4a914ecff0 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
diff --git a/drivers/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 25a8bc565f40..25a8bc565f40 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
diff --git a/drivers/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bea5c215460c..bea5c215460c 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
diff --git a/drivers/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
index e9b7e11793b1..e9b7e11793b1 100644
--- a/drivers/serial/m32r_sio.h
+++ b/drivers/tty/serial/m32r_sio.h
diff --git a/drivers/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..4671473793e3 100644
--- a/drivers/serial/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..beb1afa27d8d 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
diff --git a/drivers/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..a1fe304f2f52 100644
--- a/drivers/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
diff --git a/drivers/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..910870edf708 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
diff --git a/drivers/serial/max3107.h b/drivers/tty/serial/max3107.h
index 7ab632392502..7ab632392502 100644
--- a/drivers/serial/max3107.h
+++ b/drivers/tty/serial/max3107.h
diff --git a/drivers/serial/mcf.c b/drivers/tty/serial/mcf.c
index 3394b7cc1722..3394b7cc1722 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
diff --git a/drivers/serial/mfd.c b/drivers/tty/serial/mfd.c
index d40010a22ecd..d40010a22ecd 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 126ec7f568ec..126ec7f568ec 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
diff --git a/drivers/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 6a9c6605666a..6a9c6605666a 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
diff --git a/drivers/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index b62857bf2fdb..b62857bf2fdb 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
diff --git a/drivers/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
index d1ef43af397c..d1ef43af397c 100644
--- a/drivers/serial/mrst_max3110.h
+++ b/drivers/tty/serial/mrst_max3110.h
diff --git a/drivers/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 8e43a7b69e64..8e43a7b69e64 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
diff --git a/drivers/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index f6ca9ca79e98..f6ca9ca79e98 100644
--- a/drivers/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
diff --git a/drivers/serial/mux.c b/drivers/tty/serial/mux.c
index 9711e06a8374..9711e06a8374 100644
--- a/drivers/serial/mux.c
+++ b/drivers/tty/serial/mux.c
diff --git a/drivers/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 7735c9f35fa0..7735c9f35fa0 100644
--- a/drivers/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
diff --git a/drivers/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index de173671e3d0..de173671e3d0 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
diff --git a/drivers/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5c7abe4c94dd..5c7abe4c94dd 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
diff --git a/drivers/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7f2f01058789..7f2f01058789 100644
--- a/drivers/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
diff --git a/drivers/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 70a61458ec42..70a61458ec42 100644
--- a/drivers/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
diff --git a/drivers/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5b9cde79e4ea..5b9cde79e4ea 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
diff --git a/drivers/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fbb1b20..cbc34fbb1b20 100644
--- a/drivers/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 0aa75a97531c..0aa75a97531c 100644
--- a/drivers/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
diff --git a/drivers/serial/pxa.c b/drivers/tty/serial/pxa.c
index 1102a39b44f5..1102a39b44f5 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
diff --git a/drivers/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
index fed1a9a1ffb4..fed1a9a1ffb4 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/tty/serial/s3c2400.c
diff --git a/drivers/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
index 73f089d3efd6..73f089d3efd6 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/tty/serial/s3c2410.c
diff --git a/drivers/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
index 1700b1a2fb7e..1700b1a2fb7e 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/tty/serial/s3c2412.c
diff --git a/drivers/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
index 094cc3904b13..094cc3904b13 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/tty/serial/s3c2440.c
diff --git a/drivers/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
index fad6083ca427..fad6083ca427 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/tty/serial/s3c24a0.c
diff --git a/drivers/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
index 4be92ab50058..4be92ab50058 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/tty/serial/s3c6400.c
diff --git a/drivers/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index 6ebccd70a707..6ebccd70a707 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
diff --git a/drivers/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 2199d819a987..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
diff --git a/drivers/serial/samsung.c b/drivers/tty/serial/samsung.c
index 7ac2bf5167cd..2335edafe903 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -883,10 +883,10 @@ static struct uart_ops s3c24xx_serial_ops = {
883 883
884static struct uart_driver s3c24xx_uart_drv = { 884static struct uart_driver s3c24xx_uart_drv = {
885 .owner = THIS_MODULE, 885 .owner = THIS_MODULE,
886 .dev_name = "s3c2410_serial", 886 .driver_name = "s3c2410_serial",
887 .nr = CONFIG_SERIAL_SAMSUNG_UARTS, 887 .nr = CONFIG_SERIAL_SAMSUNG_UARTS,
888 .cons = S3C24XX_SERIAL_CONSOLE, 888 .cons = S3C24XX_SERIAL_CONSOLE,
889 .driver_name = S3C24XX_SERIAL_NAME, 889 .dev_name = S3C24XX_SERIAL_NAME,
890 .major = S3C24XX_SERIAL_MAJOR, 890 .major = S3C24XX_SERIAL_MAJOR,
891 .minor = S3C24XX_SERIAL_MINOR, 891 .minor = S3C24XX_SERIAL_MINOR,
892}; 892};
diff --git a/drivers/serial/samsung.h b/drivers/tty/serial/samsung.h
index 0ac06a07d25f..0ac06a07d25f 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
diff --git a/drivers/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index a2f2b3254499..602d9845c52f 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -829,7 +829,7 @@ static void __init sbd_probe_duarts(void)
829#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE 829#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
830/* 830/*
831 * Serial console stuff. Very basic, polling driver for doing serial 831 * Serial console stuff. Very basic, polling driver for doing serial
832 * console output. The console_sem is held by the caller, so we 832 * console output. The console_lock is held by the caller, so we
833 * shouldn't be interrupted for more console activity. 833 * shouldn't be interrupted for more console activity.
834 */ 834 */
835static void sbd_console_putchar(struct uart_port *uport, int ch) 835static void sbd_console_putchar(struct uart_port *uport, int ch)
diff --git a/drivers/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad2b242..75038ad2b242 100644
--- a/drivers/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
diff --git a/drivers/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 460a72d91bb7..460a72d91bb7 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
diff --git a/drivers/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..93760b2ea172 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
diff --git a/drivers/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index b1962025b1aa..b1962025b1aa 100644
--- a/drivers/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
index ea744707c4d6..ea744707c4d6 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/tty/serial/serial_lh7a40x.c
diff --git a/drivers/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index c50e9fbbf743..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
diff --git a/drivers/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 92c91c83edde..92c91c83edde 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
diff --git a/drivers/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b223d6cbf33a..b223d6cbf33a 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
diff --git a/drivers/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index cff9a306660f..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
diff --git a/drivers/serial/suncore.c b/drivers/tty/serial/suncore.c
index 6381a0282ee7..6381a0282ee7 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
diff --git a/drivers/serial/suncore.h b/drivers/tty/serial/suncore.h
index db2057936c31..db2057936c31 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/tty/serial/suncore.h
diff --git a/drivers/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index c9014868297d..c9014868297d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
diff --git a/drivers/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 5b246b18f42f..5b246b18f42f 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
diff --git a/drivers/serial/sunsab.h b/drivers/tty/serial/sunsab.h
index b78e1f7b8050..b78e1f7b8050 100644
--- a/drivers/serial/sunsab.h
+++ b/drivers/tty/serial/sunsab.h
diff --git a/drivers/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 551ebfe3ccbb..551ebfe3ccbb 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
diff --git a/drivers/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c1967ac1c07f..c1967ac1c07f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
diff --git a/drivers/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
index 5dec7b47cc38..5dec7b47cc38 100644
--- a/drivers/serial/sunzilog.h
+++ b/drivers/tty/serial/sunzilog.h
diff --git a/drivers/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 1f36b7eb7351..1f36b7eb7351 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
diff --git a/drivers/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index 7e566766bc43..7e566766bc43 100644
--- a/drivers/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
diff --git a/drivers/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d2fce865b731..d2fce865b731 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
diff --git a/drivers/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3f4848e2174a..3f4848e2174a 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab4fa68..3beb6ab4fa68 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
diff --git a/drivers/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 322bf56c0d89..322bf56c0d89 100644
--- a/drivers/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
diff --git a/drivers/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e70315..1a7fd3e70315 100644
--- a/drivers/serial/zs.c
+++ b/drivers/tty/serial/zs.c
diff --git a/drivers/serial/zs.h b/drivers/tty/serial/zs.h
index aa921b57d827..aa921b57d827 100644
--- a/drivers/serial/zs.h
+++ b/drivers/tty/serial/zs.h
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c556ed9db13d..8e0dd254eb11 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -46,7 +46,7 @@
46#include <asm/irq_regs.h> 46#include <asm/irq_regs.h>
47 47
48/* Whether we react on sysrq keys or just ignore them */ 48/* Whether we react on sysrq keys or just ignore them */
49static int __read_mostly sysrq_enabled = 1; 49static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
50static bool __read_mostly sysrq_always_enabled; 50static bool __read_mostly sysrq_always_enabled;
51 51
52static bool sysrq_on(void) 52static bool sysrq_on(void)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 464d09d97873..0065da4b11c1 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3256,8 +3256,8 @@ static ssize_t show_cons_active(struct device *dev,
3256 struct console *c; 3256 struct console *c;
3257 ssize_t count = 0; 3257 ssize_t count = 0;
3258 3258
3259 acquire_console_sem(); 3259 console_lock();
3260 for (c = console_drivers; c; c = c->next) { 3260 for_each_console(c) {
3261 if (!c->device) 3261 if (!c->device)
3262 continue; 3262 continue;
3263 if (!c->write) 3263 if (!c->write)
@@ -3271,7 +3271,7 @@ static ssize_t show_cons_active(struct device *dev,
3271 while (i--) 3271 while (i--)
3272 count += sprintf(buf + count, "%s%d%c", 3272 count += sprintf(buf + count, "%s%d%c",
3273 cs[i]->name, cs[i]->index, i ? ' ':'\n'); 3273 cs[i]->name, cs[i]->index, i ? ' ':'\n');
3274 release_console_sem(); 3274 console_unlock();
3275 3275
3276 return count; 3276 return count;
3277} 3277}
@@ -3306,7 +3306,7 @@ int __init tty_init(void)
3306 if (IS_ERR(consdev)) 3306 if (IS_ERR(consdev))
3307 consdev = NULL; 3307 consdev = NULL;
3308 else 3308 else
3309 device_create_file(consdev, &dev_attr_active); 3309 WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
3310 3310
3311#ifdef CONFIG_VT 3311#ifdef CONFIG_VT
3312 vty_init(&console_fops); 3312 vty_init(&console_fops);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ebae344ce910..c956ed6c83a3 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -316,9 +316,9 @@ int paste_selection(struct tty_struct *tty)
316 /* always called with BTM from vt_ioctl */ 316 /* always called with BTM from vt_ioctl */
317 WARN_ON(!tty_locked()); 317 WARN_ON(!tty_locked());
318 318
319 acquire_console_sem(); 319 console_lock();
320 poke_blanked_console(); 320 poke_blanked_console();
321 release_console_sem(); 321 console_unlock();
322 322
323 ld = tty_ldisc_ref(tty); 323 ld = tty_ldisc_ref(tty);
324 if (!ld) { 324 if (!ld) {
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index eab3a1ff99e4..a672ed192d33 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -202,7 +202,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
202 /* Select the proper current console and verify 202 /* Select the proper current console and verify
203 * sanity of the situation under the console lock. 203 * sanity of the situation under the console lock.
204 */ 204 */
205 acquire_console_sem(); 205 console_lock();
206 206
207 attr = (currcons & 128); 207 attr = (currcons & 128);
208 currcons = (currcons & 127); 208 currcons = (currcons & 127);
@@ -336,9 +336,9 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
336 * the pagefault handling code may want to call printk(). 336 * the pagefault handling code may want to call printk().
337 */ 337 */
338 338
339 release_console_sem(); 339 console_unlock();
340 ret = copy_to_user(buf, con_buf_start, orig_count); 340 ret = copy_to_user(buf, con_buf_start, orig_count);
341 acquire_console_sem(); 341 console_lock();
342 342
343 if (ret) { 343 if (ret) {
344 read += (orig_count - ret); 344 read += (orig_count - ret);
@@ -354,7 +354,7 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
354 if (read) 354 if (read)
355 ret = read; 355 ret = read;
356unlock_out: 356unlock_out:
357 release_console_sem(); 357 console_unlock();
358 mutex_unlock(&con_buf_mtx); 358 mutex_unlock(&con_buf_mtx);
359 return ret; 359 return ret;
360} 360}
@@ -379,7 +379,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
379 /* Select the proper current console and verify 379 /* Select the proper current console and verify
380 * sanity of the situation under the console lock. 380 * sanity of the situation under the console lock.
381 */ 381 */
382 acquire_console_sem(); 382 console_lock();
383 383
384 attr = (currcons & 128); 384 attr = (currcons & 128);
385 currcons = (currcons & 127); 385 currcons = (currcons & 127);
@@ -414,9 +414,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
414 /* Temporarily drop the console lock so that we can read 414 /* Temporarily drop the console lock so that we can read
415 * in the write data from userspace safely. 415 * in the write data from userspace safely.
416 */ 416 */
417 release_console_sem(); 417 console_unlock();
418 ret = copy_from_user(con_buf, buf, this_round); 418 ret = copy_from_user(con_buf, buf, this_round);
419 acquire_console_sem(); 419 console_lock();
420 420
421 if (ret) { 421 if (ret) {
422 this_round -= ret; 422 this_round -= ret;
@@ -542,7 +542,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
542 vcs_scr_updated(vc); 542 vcs_scr_updated(vc);
543 543
544unlock_out: 544unlock_out:
545 release_console_sem(); 545 console_unlock();
546 546
547 mutex_unlock(&con_buf_mtx); 547 mutex_unlock(&con_buf_mtx);
548 548
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 76407eca9ab0..147ede3423df 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1003,9 +1003,9 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
1003 struct vc_data *vc = tty->driver_data; 1003 struct vc_data *vc = tty->driver_data;
1004 int ret; 1004 int ret;
1005 1005
1006 acquire_console_sem(); 1006 console_lock();
1007 ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row); 1007 ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
1008 release_console_sem(); 1008 console_unlock();
1009 return ret; 1009 return ret;
1010} 1010}
1011 1011
@@ -1271,7 +1271,7 @@ static void default_attr(struct vc_data *vc)
1271 vc->vc_color = vc->vc_def_color; 1271 vc->vc_color = vc->vc_def_color;
1272} 1272}
1273 1273
1274/* console_sem is held */ 1274/* console_lock is held */
1275static void csi_m(struct vc_data *vc) 1275static void csi_m(struct vc_data *vc)
1276{ 1276{
1277 int i; 1277 int i;
@@ -1415,7 +1415,7 @@ int mouse_reporting(void)
1415 return vc_cons[fg_console].d->vc_report_mouse; 1415 return vc_cons[fg_console].d->vc_report_mouse;
1416} 1416}
1417 1417
1418/* console_sem is held */ 1418/* console_lock is held */
1419static void set_mode(struct vc_data *vc, int on_off) 1419static void set_mode(struct vc_data *vc, int on_off)
1420{ 1420{
1421 int i; 1421 int i;
@@ -1485,7 +1485,7 @@ static void set_mode(struct vc_data *vc, int on_off)
1485 } 1485 }
1486} 1486}
1487 1487
1488/* console_sem is held */ 1488/* console_lock is held */
1489static void setterm_command(struct vc_data *vc) 1489static void setterm_command(struct vc_data *vc)
1490{ 1490{
1491 switch(vc->vc_par[0]) { 1491 switch(vc->vc_par[0]) {
@@ -1545,7 +1545,7 @@ static void setterm_command(struct vc_data *vc)
1545 } 1545 }
1546} 1546}
1547 1547
1548/* console_sem is held */ 1548/* console_lock is held */
1549static void csi_at(struct vc_data *vc, unsigned int nr) 1549static void csi_at(struct vc_data *vc, unsigned int nr)
1550{ 1550{
1551 if (nr > vc->vc_cols - vc->vc_x) 1551 if (nr > vc->vc_cols - vc->vc_x)
@@ -1555,7 +1555,7 @@ static void csi_at(struct vc_data *vc, unsigned int nr)
1555 insert_char(vc, nr); 1555 insert_char(vc, nr);
1556} 1556}
1557 1557
1558/* console_sem is held */ 1558/* console_lock is held */
1559static void csi_L(struct vc_data *vc, unsigned int nr) 1559static void csi_L(struct vc_data *vc, unsigned int nr)
1560{ 1560{
1561 if (nr > vc->vc_rows - vc->vc_y) 1561 if (nr > vc->vc_rows - vc->vc_y)
@@ -1566,7 +1566,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
1566 vc->vc_need_wrap = 0; 1566 vc->vc_need_wrap = 0;
1567} 1567}
1568 1568
1569/* console_sem is held */ 1569/* console_lock is held */
1570static void csi_P(struct vc_data *vc, unsigned int nr) 1570static void csi_P(struct vc_data *vc, unsigned int nr)
1571{ 1571{
1572 if (nr > vc->vc_cols - vc->vc_x) 1572 if (nr > vc->vc_cols - vc->vc_x)
@@ -1576,7 +1576,7 @@ static void csi_P(struct vc_data *vc, unsigned int nr)
1576 delete_char(vc, nr); 1576 delete_char(vc, nr);
1577} 1577}
1578 1578
1579/* console_sem is held */ 1579/* console_lock is held */
1580static void csi_M(struct vc_data *vc, unsigned int nr) 1580static void csi_M(struct vc_data *vc, unsigned int nr)
1581{ 1581{
1582 if (nr > vc->vc_rows - vc->vc_y) 1582 if (nr > vc->vc_rows - vc->vc_y)
@@ -1587,7 +1587,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
1587 vc->vc_need_wrap = 0; 1587 vc->vc_need_wrap = 0;
1588} 1588}
1589 1589
1590/* console_sem is held (except via vc_init->reset_terminal */ 1590/* console_lock is held (except via vc_init->reset_terminal */
1591static void save_cur(struct vc_data *vc) 1591static void save_cur(struct vc_data *vc)
1592{ 1592{
1593 vc->vc_saved_x = vc->vc_x; 1593 vc->vc_saved_x = vc->vc_x;
@@ -1603,7 +1603,7 @@ static void save_cur(struct vc_data *vc)
1603 vc->vc_saved_G1 = vc->vc_G1_charset; 1603 vc->vc_saved_G1 = vc->vc_G1_charset;
1604} 1604}
1605 1605
1606/* console_sem is held */ 1606/* console_lock is held */
1607static void restore_cur(struct vc_data *vc) 1607static void restore_cur(struct vc_data *vc)
1608{ 1608{
1609 gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y); 1609 gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
@@ -1625,7 +1625,7 @@ enum { ESnormal, ESesc, ESsquare, ESgetpars, ESgotpars, ESfunckey,
1625 EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd, 1625 EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
1626 ESpalette }; 1626 ESpalette };
1627 1627
1628/* console_sem is held (except via vc_init()) */ 1628/* console_lock is held (except via vc_init()) */
1629static void reset_terminal(struct vc_data *vc, int do_clear) 1629static void reset_terminal(struct vc_data *vc, int do_clear)
1630{ 1630{
1631 vc->vc_top = 0; 1631 vc->vc_top = 0;
@@ -1685,7 +1685,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1685 csi_J(vc, 2); 1685 csi_J(vc, 2);
1686} 1686}
1687 1687
1688/* console_sem is held */ 1688/* console_lock is held */
1689static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) 1689static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1690{ 1690{
1691 /* 1691 /*
@@ -2119,7 +2119,7 @@ static int is_double_width(uint32_t ucs)
2119 return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1); 2119 return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
2120} 2120}
2121 2121
2122/* acquires console_sem */ 2122/* acquires console_lock */
2123static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count) 2123static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
2124{ 2124{
2125#ifdef VT_BUF_VRAM_ONLY 2125#ifdef VT_BUF_VRAM_ONLY
@@ -2147,11 +2147,11 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
2147 2147
2148 might_sleep(); 2148 might_sleep();
2149 2149
2150 acquire_console_sem(); 2150 console_lock();
2151 vc = tty->driver_data; 2151 vc = tty->driver_data;
2152 if (vc == NULL) { 2152 if (vc == NULL) {
2153 printk(KERN_ERR "vt: argh, driver_data is NULL !\n"); 2153 printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
2154 release_console_sem(); 2154 console_unlock();
2155 return 0; 2155 return 0;
2156 } 2156 }
2157 2157
@@ -2159,7 +2159,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
2159 if (!vc_cons_allocated(currcons)) { 2159 if (!vc_cons_allocated(currcons)) {
2160 /* could this happen? */ 2160 /* could this happen? */
2161 printk_once("con_write: tty %d not allocated\n", currcons+1); 2161 printk_once("con_write: tty %d not allocated\n", currcons+1);
2162 release_console_sem(); 2162 console_unlock();
2163 return 0; 2163 return 0;
2164 } 2164 }
2165 2165
@@ -2375,7 +2375,7 @@ rescan_last_byte:
2375 } 2375 }
2376 FLUSH 2376 FLUSH
2377 console_conditional_schedule(); 2377 console_conditional_schedule();
2378 release_console_sem(); 2378 console_unlock();
2379 notify_update(vc); 2379 notify_update(vc);
2380 return n; 2380 return n;
2381#undef FLUSH 2381#undef FLUSH
@@ -2388,11 +2388,11 @@ rescan_last_byte:
2388 * us to do the switches asynchronously (needed when we want 2388 * us to do the switches asynchronously (needed when we want
2389 * to switch due to a keyboard interrupt). Synchronization 2389 * to switch due to a keyboard interrupt). Synchronization
2390 * with other console code and prevention of re-entrancy is 2390 * with other console code and prevention of re-entrancy is
2391 * ensured with console_sem. 2391 * ensured with console_lock.
2392 */ 2392 */
2393static void console_callback(struct work_struct *ignored) 2393static void console_callback(struct work_struct *ignored)
2394{ 2394{
2395 acquire_console_sem(); 2395 console_lock();
2396 2396
2397 if (want_console >= 0) { 2397 if (want_console >= 0) {
2398 if (want_console != fg_console && 2398 if (want_console != fg_console &&
@@ -2422,7 +2422,7 @@ static void console_callback(struct work_struct *ignored)
2422 } 2422 }
2423 notify_update(vc_cons[fg_console].d); 2423 notify_update(vc_cons[fg_console].d);
2424 2424
2425 release_console_sem(); 2425 console_unlock();
2426} 2426}
2427 2427
2428int set_console(int nr) 2428int set_console(int nr)
@@ -2603,7 +2603,7 @@ static struct console vt_console_driver = {
2603 */ 2603 */
2604 2604
2605/* 2605/*
2606 * Generally a bit racy with respect to console_sem(). 2606 * Generally a bit racy with respect to console_lock();.
2607 * 2607 *
2608 * There are some functions which don't need it. 2608 * There are some functions which don't need it.
2609 * 2609 *
@@ -2629,17 +2629,17 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2629 switch (type) 2629 switch (type)
2630 { 2630 {
2631 case TIOCL_SETSEL: 2631 case TIOCL_SETSEL:
2632 acquire_console_sem(); 2632 console_lock();
2633 ret = set_selection((struct tiocl_selection __user *)(p+1), tty); 2633 ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
2634 release_console_sem(); 2634 console_unlock();
2635 break; 2635 break;
2636 case TIOCL_PASTESEL: 2636 case TIOCL_PASTESEL:
2637 ret = paste_selection(tty); 2637 ret = paste_selection(tty);
2638 break; 2638 break;
2639 case TIOCL_UNBLANKSCREEN: 2639 case TIOCL_UNBLANKSCREEN:
2640 acquire_console_sem(); 2640 console_lock();
2641 unblank_screen(); 2641 unblank_screen();
2642 release_console_sem(); 2642 console_unlock();
2643 break; 2643 break;
2644 case TIOCL_SELLOADLUT: 2644 case TIOCL_SELLOADLUT:
2645 ret = sel_loadlut(p); 2645 ret = sel_loadlut(p);
@@ -2688,10 +2688,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2688 } 2688 }
2689 break; 2689 break;
2690 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */ 2690 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
2691 acquire_console_sem(); 2691 console_lock();
2692 ignore_poke = 1; 2692 ignore_poke = 1;
2693 do_blank_screen(0); 2693 do_blank_screen(0);
2694 release_console_sem(); 2694 console_unlock();
2695 break; 2695 break;
2696 case TIOCL_BLANKEDSCREEN: 2696 case TIOCL_BLANKEDSCREEN:
2697 ret = console_blanked; 2697 ret = console_blanked;
@@ -2790,11 +2790,11 @@ static void con_flush_chars(struct tty_struct *tty)
2790 return; 2790 return;
2791 2791
2792 /* if we race with con_close(), vt may be null */ 2792 /* if we race with con_close(), vt may be null */
2793 acquire_console_sem(); 2793 console_lock();
2794 vc = tty->driver_data; 2794 vc = tty->driver_data;
2795 if (vc) 2795 if (vc)
2796 set_cursor(vc); 2796 set_cursor(vc);
2797 release_console_sem(); 2797 console_unlock();
2798} 2798}
2799 2799
2800/* 2800/*
@@ -2805,7 +2805,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2805 unsigned int currcons = tty->index; 2805 unsigned int currcons = tty->index;
2806 int ret = 0; 2806 int ret = 0;
2807 2807
2808 acquire_console_sem(); 2808 console_lock();
2809 if (tty->driver_data == NULL) { 2809 if (tty->driver_data == NULL) {
2810 ret = vc_allocate(currcons); 2810 ret = vc_allocate(currcons);
2811 if (ret == 0) { 2811 if (ret == 0) {
@@ -2813,7 +2813,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2813 2813
2814 /* Still being freed */ 2814 /* Still being freed */
2815 if (vc->port.tty) { 2815 if (vc->port.tty) {
2816 release_console_sem(); 2816 console_unlock();
2817 return -ERESTARTSYS; 2817 return -ERESTARTSYS;
2818 } 2818 }
2819 tty->driver_data = vc; 2819 tty->driver_data = vc;
@@ -2827,11 +2827,11 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2827 tty->termios->c_iflag |= IUTF8; 2827 tty->termios->c_iflag |= IUTF8;
2828 else 2828 else
2829 tty->termios->c_iflag &= ~IUTF8; 2829 tty->termios->c_iflag &= ~IUTF8;
2830 release_console_sem(); 2830 console_unlock();
2831 return ret; 2831 return ret;
2832 } 2832 }
2833 } 2833 }
2834 release_console_sem(); 2834 console_unlock();
2835 return ret; 2835 return ret;
2836} 2836}
2837 2837
@@ -2844,9 +2844,9 @@ static void con_shutdown(struct tty_struct *tty)
2844{ 2844{
2845 struct vc_data *vc = tty->driver_data; 2845 struct vc_data *vc = tty->driver_data;
2846 BUG_ON(vc == NULL); 2846 BUG_ON(vc == NULL);
2847 acquire_console_sem(); 2847 console_lock();
2848 vc->port.tty = NULL; 2848 vc->port.tty = NULL;
2849 release_console_sem(); 2849 console_unlock();
2850 tty_shutdown(tty); 2850 tty_shutdown(tty);
2851} 2851}
2852 2852
@@ -2893,13 +2893,13 @@ static int __init con_init(void)
2893 struct vc_data *vc; 2893 struct vc_data *vc;
2894 unsigned int currcons = 0, i; 2894 unsigned int currcons = 0, i;
2895 2895
2896 acquire_console_sem(); 2896 console_lock();
2897 2897
2898 if (conswitchp) 2898 if (conswitchp)
2899 display_desc = conswitchp->con_startup(); 2899 display_desc = conswitchp->con_startup();
2900 if (!display_desc) { 2900 if (!display_desc) {
2901 fg_console = 0; 2901 fg_console = 0;
2902 release_console_sem(); 2902 console_unlock();
2903 return 0; 2903 return 0;
2904 } 2904 }
2905 2905
@@ -2946,7 +2946,7 @@ static int __init con_init(void)
2946 printable = 1; 2946 printable = 1;
2947 printk("\n"); 2947 printk("\n");
2948 2948
2949 release_console_sem(); 2949 console_unlock();
2950 2950
2951#ifdef CONFIG_VT_CONSOLE 2951#ifdef CONFIG_VT_CONSOLE
2952 register_console(&vt_console_driver); 2952 register_console(&vt_console_driver);
@@ -2994,7 +2994,7 @@ int __init vty_init(const struct file_operations *console_fops)
2994 if (IS_ERR(tty0dev)) 2994 if (IS_ERR(tty0dev))
2995 tty0dev = NULL; 2995 tty0dev = NULL;
2996 else 2996 else
2997 device_create_file(tty0dev, &dev_attr_active); 2997 WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
2998 2998
2999 vcs_init(); 2999 vcs_init();
3000 3000
@@ -3037,7 +3037,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3037 if (!try_module_get(owner)) 3037 if (!try_module_get(owner))
3038 return -ENODEV; 3038 return -ENODEV;
3039 3039
3040 acquire_console_sem(); 3040 console_lock();
3041 3041
3042 /* check if driver is registered */ 3042 /* check if driver is registered */
3043 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3043 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3122,7 +3122,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3122 3122
3123 retval = 0; 3123 retval = 0;
3124err: 3124err:
3125 release_console_sem(); 3125 console_unlock();
3126 module_put(owner); 3126 module_put(owner);
3127 return retval; 3127 return retval;
3128}; 3128};
@@ -3171,7 +3171,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3171 if (!try_module_get(owner)) 3171 if (!try_module_get(owner))
3172 return -ENODEV; 3172 return -ENODEV;
3173 3173
3174 acquire_console_sem(); 3174 console_lock();
3175 3175
3176 /* check if driver is registered and if it is unbindable */ 3176 /* check if driver is registered and if it is unbindable */
3177 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3177 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3185,7 +3185,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3185 } 3185 }
3186 3186
3187 if (retval) { 3187 if (retval) {
3188 release_console_sem(); 3188 console_unlock();
3189 goto err; 3189 goto err;
3190 } 3190 }
3191 3191
@@ -3204,12 +3204,12 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3204 } 3204 }
3205 3205
3206 if (retval) { 3206 if (retval) {
3207 release_console_sem(); 3207 console_unlock();
3208 goto err; 3208 goto err;
3209 } 3209 }
3210 3210
3211 if (!con_is_bound(csw)) { 3211 if (!con_is_bound(csw)) {
3212 release_console_sem(); 3212 console_unlock();
3213 goto err; 3213 goto err;
3214 } 3214 }
3215 3215
@@ -3238,7 +3238,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3238 if (!con_is_bound(csw)) 3238 if (!con_is_bound(csw))
3239 con_driver->flag &= ~CON_DRIVER_FLAG_INIT; 3239 con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
3240 3240
3241 release_console_sem(); 3241 console_unlock();
3242 /* ignore return value, binding should not fail */ 3242 /* ignore return value, binding should not fail */
3243 bind_con_driver(defcsw, first, last, deflt); 3243 bind_con_driver(defcsw, first, last, deflt);
3244err: 3244err:
@@ -3538,14 +3538,14 @@ int register_con_driver(const struct consw *csw, int first, int last)
3538 if (!try_module_get(owner)) 3538 if (!try_module_get(owner))
3539 return -ENODEV; 3539 return -ENODEV;
3540 3540
3541 acquire_console_sem(); 3541 console_lock();
3542 3542
3543 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3543 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3544 con_driver = &registered_con_driver[i]; 3544 con_driver = &registered_con_driver[i];
3545 3545
3546 /* already registered */ 3546 /* already registered */
3547 if (con_driver->con == csw) 3547 if (con_driver->con == csw)
3548 retval = -EINVAL; 3548 retval = -EBUSY;
3549 } 3549 }
3550 3550
3551 if (retval) 3551 if (retval)
@@ -3592,7 +3592,7 @@ int register_con_driver(const struct consw *csw, int first, int last)
3592 } 3592 }
3593 3593
3594err: 3594err:
3595 release_console_sem(); 3595 console_unlock();
3596 module_put(owner); 3596 module_put(owner);
3597 return retval; 3597 return retval;
3598} 3598}
@@ -3613,7 +3613,7 @@ int unregister_con_driver(const struct consw *csw)
3613{ 3613{
3614 int i, retval = -ENODEV; 3614 int i, retval = -ENODEV;
3615 3615
3616 acquire_console_sem(); 3616 console_lock();
3617 3617
3618 /* cannot unregister a bound driver */ 3618 /* cannot unregister a bound driver */
3619 if (con_is_bound(csw)) 3619 if (con_is_bound(csw))
@@ -3639,7 +3639,7 @@ int unregister_con_driver(const struct consw *csw)
3639 } 3639 }
3640 } 3640 }
3641err: 3641err:
3642 release_console_sem(); 3642 console_unlock();
3643 return retval; 3643 return retval;
3644} 3644}
3645EXPORT_SYMBOL(unregister_con_driver); 3645EXPORT_SYMBOL(unregister_con_driver);
@@ -3656,7 +3656,12 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
3656 int err; 3656 int err;
3657 3657
3658 err = register_con_driver(csw, first, last); 3658 err = register_con_driver(csw, first, last);
3659 3659 /* if we get an busy error we still want to bind the console driver
3660 * and return success, as we may have unbound the console driver
3661  * but not unregistered it.
3662 */
3663 if (err == -EBUSY)
3664 err = 0;
3660 if (!err) 3665 if (!err)
3661 bind_con_driver(csw, first, last, deflt); 3666 bind_con_driver(csw, first, last, deflt);
3662 3667
@@ -3934,9 +3939,9 @@ int con_set_cmap(unsigned char __user *arg)
3934{ 3939{
3935 int rc; 3940 int rc;
3936 3941
3937 acquire_console_sem(); 3942 console_lock();
3938 rc = set_get_cmap (arg,1); 3943 rc = set_get_cmap (arg,1);
3939 release_console_sem(); 3944 console_unlock();
3940 3945
3941 return rc; 3946 return rc;
3942} 3947}
@@ -3945,9 +3950,9 @@ int con_get_cmap(unsigned char __user *arg)
3945{ 3950{
3946 int rc; 3951 int rc;
3947 3952
3948 acquire_console_sem(); 3953 console_lock();
3949 rc = set_get_cmap (arg,0); 3954 rc = set_get_cmap (arg,0);
3950 release_console_sem(); 3955 console_unlock();
3951 3956
3952 return rc; 3957 return rc;
3953} 3958}
@@ -3994,12 +3999,12 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
3994 } else 3999 } else
3995 font.data = NULL; 4000 font.data = NULL;
3996 4001
3997 acquire_console_sem(); 4002 console_lock();
3998 if (vc->vc_sw->con_font_get) 4003 if (vc->vc_sw->con_font_get)
3999 rc = vc->vc_sw->con_font_get(vc, &font); 4004 rc = vc->vc_sw->con_font_get(vc, &font);
4000 else 4005 else
4001 rc = -ENOSYS; 4006 rc = -ENOSYS;
4002 release_console_sem(); 4007 console_unlock();
4003 4008
4004 if (rc) 4009 if (rc)
4005 goto out; 4010 goto out;
@@ -4076,12 +4081,12 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
4076 font.data = memdup_user(op->data, size); 4081 font.data = memdup_user(op->data, size);
4077 if (IS_ERR(font.data)) 4082 if (IS_ERR(font.data))
4078 return PTR_ERR(font.data); 4083 return PTR_ERR(font.data);
4079 acquire_console_sem(); 4084 console_lock();
4080 if (vc->vc_sw->con_font_set) 4085 if (vc->vc_sw->con_font_set)
4081 rc = vc->vc_sw->con_font_set(vc, &font, op->flags); 4086 rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
4082 else 4087 else
4083 rc = -ENOSYS; 4088 rc = -ENOSYS;
4084 release_console_sem(); 4089 console_unlock();
4085 kfree(font.data); 4090 kfree(font.data);
4086 return rc; 4091 return rc;
4087} 4092}
@@ -4103,12 +4108,12 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
4103 else 4108 else
4104 name[MAX_FONT_NAME - 1] = 0; 4109 name[MAX_FONT_NAME - 1] = 0;
4105 4110
4106 acquire_console_sem(); 4111 console_lock();
4107 if (vc->vc_sw->con_font_default) 4112 if (vc->vc_sw->con_font_default)
4108 rc = vc->vc_sw->con_font_default(vc, &font, s); 4113 rc = vc->vc_sw->con_font_default(vc, &font, s);
4109 else 4114 else
4110 rc = -ENOSYS; 4115 rc = -ENOSYS;
4111 release_console_sem(); 4116 console_unlock();
4112 if (!rc) { 4117 if (!rc) {
4113 op->width = font.width; 4118 op->width = font.width;
4114 op->height = font.height; 4119 op->height = font.height;
@@ -4124,7 +4129,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
4124 if (vc->vc_mode != KD_TEXT) 4129 if (vc->vc_mode != KD_TEXT)
4125 return -EINVAL; 4130 return -EINVAL;
4126 4131
4127 acquire_console_sem(); 4132 console_lock();
4128 if (!vc->vc_sw->con_font_copy) 4133 if (!vc->vc_sw->con_font_copy)
4129 rc = -ENOSYS; 4134 rc = -ENOSYS;
4130 else if (con < 0 || !vc_cons_allocated(con)) 4135 else if (con < 0 || !vc_cons_allocated(con))
@@ -4133,7 +4138,7 @@ static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
4133 rc = 0; 4138 rc = 0;
4134 else 4139 else
4135 rc = vc->vc_sw->con_font_copy(vc, con); 4140 rc = vc->vc_sw->con_font_copy(vc, con);
4136 release_console_sem(); 4141 console_unlock();
4137 return rc; 4142 return rc;
4138} 4143}
4139 4144
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0fb4611..1235ebda6e1c 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -649,12 +649,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
649 /* 649 /*
650 * explicitly blank/unblank the screen if switching modes 650 * explicitly blank/unblank the screen if switching modes
651 */ 651 */
652 acquire_console_sem(); 652 console_lock();
653 if (arg == KD_TEXT) 653 if (arg == KD_TEXT)
654 do_unblank_screen(1); 654 do_unblank_screen(1);
655 else 655 else
656 do_blank_screen(1); 656 do_blank_screen(1);
657 release_console_sem(); 657 console_unlock();
658 break; 658 break;
659 659
660 case KDGETMODE: 660 case KDGETMODE:
@@ -893,7 +893,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
893 ret = -EINVAL; 893 ret = -EINVAL;
894 goto out; 894 goto out;
895 } 895 }
896 acquire_console_sem(); 896 console_lock();
897 vc->vt_mode = tmp; 897 vc->vt_mode = tmp;
898 /* the frsig is ignored, so we set it to 0 */ 898 /* the frsig is ignored, so we set it to 0 */
899 vc->vt_mode.frsig = 0; 899 vc->vt_mode.frsig = 0;
@@ -901,7 +901,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
901 vc->vt_pid = get_pid(task_pid(current)); 901 vc->vt_pid = get_pid(task_pid(current));
902 /* no switch is required -- saw@shade.msu.ru */ 902 /* no switch is required -- saw@shade.msu.ru */
903 vc->vt_newvt = -1; 903 vc->vt_newvt = -1;
904 release_console_sem(); 904 console_unlock();
905 break; 905 break;
906 } 906 }
907 907
@@ -910,9 +910,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
910 struct vt_mode tmp; 910 struct vt_mode tmp;
911 int rc; 911 int rc;
912 912
913 acquire_console_sem(); 913 console_lock();
914 memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode)); 914 memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
915 release_console_sem(); 915 console_unlock();
916 916
917 rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); 917 rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
918 if (rc) 918 if (rc)
@@ -965,9 +965,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
965 ret = -ENXIO; 965 ret = -ENXIO;
966 else { 966 else {
967 arg--; 967 arg--;
968 acquire_console_sem(); 968 console_lock();
969 ret = vc_allocate(arg); 969 ret = vc_allocate(arg);
970 release_console_sem(); 970 console_unlock();
971 if (ret) 971 if (ret)
972 break; 972 break;
973 set_console(arg); 973 set_console(arg);
@@ -990,7 +990,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
990 ret = -ENXIO; 990 ret = -ENXIO;
991 else { 991 else {
992 vsa.console--; 992 vsa.console--;
993 acquire_console_sem(); 993 console_lock();
994 ret = vc_allocate(vsa.console); 994 ret = vc_allocate(vsa.console);
995 if (ret == 0) { 995 if (ret == 0) {
996 struct vc_data *nvc; 996 struct vc_data *nvc;
@@ -1003,7 +1003,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1003 put_pid(nvc->vt_pid); 1003 put_pid(nvc->vt_pid);
1004 nvc->vt_pid = get_pid(task_pid(current)); 1004 nvc->vt_pid = get_pid(task_pid(current));
1005 } 1005 }
1006 release_console_sem(); 1006 console_unlock();
1007 if (ret) 1007 if (ret)
1008 break; 1008 break;
1009 /* Commence switch and lock */ 1009 /* Commence switch and lock */
@@ -1044,7 +1044,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1044 /* 1044 /*
1045 * Switching-from response 1045 * Switching-from response
1046 */ 1046 */
1047 acquire_console_sem(); 1047 console_lock();
1048 if (vc->vt_newvt >= 0) { 1048 if (vc->vt_newvt >= 0) {
1049 if (arg == 0) 1049 if (arg == 0)
1050 /* 1050 /*
@@ -1063,7 +1063,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1063 vc->vt_newvt = -1; 1063 vc->vt_newvt = -1;
1064 ret = vc_allocate(newvt); 1064 ret = vc_allocate(newvt);
1065 if (ret) { 1065 if (ret) {
1066 release_console_sem(); 1066 console_unlock();
1067 break; 1067 break;
1068 } 1068 }
1069 /* 1069 /*
@@ -1083,7 +1083,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1083 if (arg != VT_ACKACQ) 1083 if (arg != VT_ACKACQ)
1084 ret = -EINVAL; 1084 ret = -EINVAL;
1085 } 1085 }
1086 release_console_sem(); 1086 console_unlock();
1087 break; 1087 break;
1088 1088
1089 /* 1089 /*
@@ -1096,20 +1096,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1096 } 1096 }
1097 if (arg == 0) { 1097 if (arg == 0) {
1098 /* deallocate all unused consoles, but leave 0 */ 1098 /* deallocate all unused consoles, but leave 0 */
1099 acquire_console_sem(); 1099 console_lock();
1100 for (i=1; i<MAX_NR_CONSOLES; i++) 1100 for (i=1; i<MAX_NR_CONSOLES; i++)
1101 if (! VT_BUSY(i)) 1101 if (! VT_BUSY(i))
1102 vc_deallocate(i); 1102 vc_deallocate(i);
1103 release_console_sem(); 1103 console_unlock();
1104 } else { 1104 } else {
1105 /* deallocate a single console, if possible */ 1105 /* deallocate a single console, if possible */
1106 arg--; 1106 arg--;
1107 if (VT_BUSY(arg)) 1107 if (VT_BUSY(arg))
1108 ret = -EBUSY; 1108 ret = -EBUSY;
1109 else if (arg) { /* leave 0 */ 1109 else if (arg) { /* leave 0 */
1110 acquire_console_sem(); 1110 console_lock();
1111 vc_deallocate(arg); 1111 vc_deallocate(arg);
1112 release_console_sem(); 1112 console_unlock();
1113 } 1113 }
1114 } 1114 }
1115 break; 1115 break;
@@ -1126,7 +1126,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1126 get_user(cc, &vtsizes->v_cols)) 1126 get_user(cc, &vtsizes->v_cols))
1127 ret = -EFAULT; 1127 ret = -EFAULT;
1128 else { 1128 else {
1129 acquire_console_sem(); 1129 console_lock();
1130 for (i = 0; i < MAX_NR_CONSOLES; i++) { 1130 for (i = 0; i < MAX_NR_CONSOLES; i++) {
1131 vc = vc_cons[i].d; 1131 vc = vc_cons[i].d;
1132 1132
@@ -1135,7 +1135,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1135 vc_resize(vc_cons[i].d, cc, ll); 1135 vc_resize(vc_cons[i].d, cc, ll);
1136 } 1136 }
1137 } 1137 }
1138 release_console_sem(); 1138 console_unlock();
1139 } 1139 }
1140 break; 1140 break;
1141 } 1141 }
@@ -1187,14 +1187,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1187 for (i = 0; i < MAX_NR_CONSOLES; i++) { 1187 for (i = 0; i < MAX_NR_CONSOLES; i++) {
1188 if (!vc_cons[i].d) 1188 if (!vc_cons[i].d)
1189 continue; 1189 continue;
1190 acquire_console_sem(); 1190 console_lock();
1191 if (vlin) 1191 if (vlin)
1192 vc_cons[i].d->vc_scan_lines = vlin; 1192 vc_cons[i].d->vc_scan_lines = vlin;
1193 if (clin) 1193 if (clin)
1194 vc_cons[i].d->vc_font.height = clin; 1194 vc_cons[i].d->vc_font.height = clin;
1195 vc_cons[i].d->vc_resize_user = 1; 1195 vc_cons[i].d->vc_resize_user = 1;
1196 vc_resize(vc_cons[i].d, cc, ll); 1196 vc_resize(vc_cons[i].d, cc, ll);
1197 release_console_sem(); 1197 console_unlock();
1198 } 1198 }
1199 break; 1199 break;
1200 } 1200 }
@@ -1367,7 +1367,7 @@ void vc_SAK(struct work_struct *work)
1367 struct vc_data *vc; 1367 struct vc_data *vc;
1368 struct tty_struct *tty; 1368 struct tty_struct *tty;
1369 1369
1370 acquire_console_sem(); 1370 console_lock();
1371 vc = vc_con->d; 1371 vc = vc_con->d;
1372 if (vc) { 1372 if (vc) {
1373 tty = vc->port.tty; 1373 tty = vc->port.tty;
@@ -1379,7 +1379,7 @@ void vc_SAK(struct work_struct *work)
1379 __do_SAK(tty); 1379 __do_SAK(tty);
1380 reset_vc(vc); 1380 reset_vc(vc);
1381 } 1381 }
1382 release_console_sem(); 1382 console_unlock();
1383} 1383}
1384 1384
1385#ifdef CONFIG_COMPAT 1385#ifdef CONFIG_COMPAT
@@ -1737,10 +1737,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
1737{ 1737{
1738 int prev; 1738 int prev;
1739 1739
1740 acquire_console_sem(); 1740 console_lock();
1741 /* Graphics mode - up to X */ 1741 /* Graphics mode - up to X */
1742 if (disable_vt_switch) { 1742 if (disable_vt_switch) {
1743 release_console_sem(); 1743 console_unlock();
1744 return 0; 1744 return 0;
1745 } 1745 }
1746 prev = fg_console; 1746 prev = fg_console;
@@ -1748,7 +1748,7 @@ int vt_move_to_console(unsigned int vt, int alloc)
1748 if (alloc && vc_allocate(vt)) { 1748 if (alloc && vc_allocate(vt)) {
1749 /* we can't have a free VC for now. Too bad, 1749 /* we can't have a free VC for now. Too bad,
1750 * we don't want to mess the screen for now. */ 1750 * we don't want to mess the screen for now. */
1751 release_console_sem(); 1751 console_unlock();
1752 return -ENOSPC; 1752 return -ENOSPC;
1753 } 1753 }
1754 1754
@@ -1758,10 +1758,10 @@ int vt_move_to_console(unsigned int vt, int alloc)
1758 * Let the calling function know so it can decide 1758 * Let the calling function know so it can decide
1759 * what to do. 1759 * what to do.
1760 */ 1760 */
1761 release_console_sem(); 1761 console_unlock();
1762 return -EIO; 1762 return -EIO;
1763 } 1763 }
1764 release_console_sem(); 1764 console_unlock();
1765 tty_lock(); 1765 tty_lock();
1766 if (vt_waitactive(vt + 1)) { 1766 if (vt_waitactive(vt + 1)) {
1767 pr_debug("Suspend: Can't switch VCs."); 1767 pr_debug("Suspend: Can't switch VCs.");
@@ -1781,8 +1781,8 @@ int vt_move_to_console(unsigned int vt, int alloc)
1781 */ 1781 */
1782void pm_set_vt_switch(int do_switch) 1782void pm_set_vt_switch(int do_switch)
1783{ 1783{
1784 acquire_console_sem(); 1784 console_lock();
1785 disable_vt_switch = !do_switch; 1785 disable_vt_switch = !do_switch;
1786 release_console_sem(); 1786 console_unlock();
1787} 1787}
1788EXPORT_SYMBOL(pm_set_vt_switch); 1788EXPORT_SYMBOL(pm_set_vt_switch);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede989ff22..4ab49d4eebf4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ 1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ 1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ 1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1610 { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
1610 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ 1611 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1611 1612
1612 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1613 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6ee4451bfe2d..47085e5879ab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -342,7 +342,7 @@ static ssize_t wdm_write
342 goto outnp; 342 goto outnp;
343 } 343 }
344 344
345 if (!file->f_flags && O_NONBLOCK) 345 if (!(file->f_flags & O_NONBLOCK))
346 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, 346 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
347 &desc->flags)); 347 &desc->flags));
348 else 348 else
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index bcc24779ba0e..18d02e32a3d5 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -123,9 +123,9 @@ config USB_OTG
123 123
124config USB_OTG_WHITELIST 124config USB_OTG_WHITELIST
125 bool "Rely on OTG Targeted Peripherals List" 125 bool "Rely on OTG Targeted Peripherals List"
126 depends on USB_OTG || EMBEDDED 126 depends on USB_OTG || EXPERT
127 default y if USB_OTG 127 default y if USB_OTG
128 default n if EMBEDDED 128 default n if EXPERT
129 help 129 help
130 If you say Y here, the "otg_whitelist.h" file will be used as a 130 If you say Y here, the "otg_whitelist.h" file will be used as a
131 product whitelist, so USB peripherals not listed there will be 131 product whitelist, so USB peripherals not listed there will be
@@ -141,7 +141,7 @@ config USB_OTG_WHITELIST
141 141
142config USB_OTG_BLACKLIST_HUB 142config USB_OTG_BLACKLIST_HUB
143 bool "Disable external hubs" 143 bool "Disable external hubs"
144 depends on USB_OTG || EMBEDDED 144 depends on USB_OTG || EXPERT
145 help 145 help
146 If you say Y here, then Linux will refuse to enumerate 146 If you say Y here, then Linux will refuse to enumerate
147 external hubs. OTG hosts are allowed to reduce hardware 147 external hubs. OTG hosts are allowed to reduce hardware
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 9da250563027..df502a98d0df 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -192,12 +192,12 @@ int usb_create_ep_devs(struct device *parent,
192 ep_dev->dev.parent = parent; 192 ep_dev->dev.parent = parent;
193 ep_dev->dev.release = ep_device_release; 193 ep_dev->dev.release = ep_device_release;
194 dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress); 194 dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
195 device_enable_async_suspend(&ep_dev->dev);
196 195
197 retval = device_register(&ep_dev->dev); 196 retval = device_register(&ep_dev->dev);
198 if (retval) 197 if (retval)
199 goto error_register; 198 goto error_register;
200 199
200 device_enable_async_suspend(&ep_dev->dev);
201 endpoint->ep_dev = ep_dev; 201 endpoint->ep_dev = ep_dev;
202 return retval; 202 return retval;
203 203
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b55d46070a25..f71e8e307e0f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup)
405 return retval; 405 return retval;
406 } 406 }
407 407
408 synchronize_irq(pci_dev->irq); 408 /* If MSI-X is enabled, the driver will have synchronized all vectors
409 * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
410 * synchronized here.
411 */
412 if (!hcd->msix_enabled)
413 synchronize_irq(pci_dev->irq);
409 414
410 /* Downstream ports from this root hub should already be quiesced, so 415 /* Downstream ports from this root hub should already be quiesced, so
411 * there will be no DMA activity. Now we can shut down the upstream 416 * there will be no DMA activity. Now we can shut down the upstream
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017fa62b..e935f71d7a34 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1955 1955
1956 dev_dbg(&rhdev->dev, "usb %s%s\n", 1956 dev_dbg(&rhdev->dev, "usb %s%s\n",
1957 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); 1957 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
1958 clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
1959 if (!hcd->driver->bus_resume) 1958 if (!hcd->driver->bus_resume)
1960 return -ENOENT; 1959 return -ENOENT;
1961 if (hcd->state == HC_STATE_RUNNING) 1960 if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1963 1962
1964 hcd->state = HC_STATE_RESUMING; 1963 hcd->state = HC_STATE_RESUMING;
1965 status = hcd->driver->bus_resume(hcd); 1964 status = hcd->driver->bus_resume(hcd);
1965 clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
1966 if (status == 0) { 1966 if (status == 0) {
1967 /* TRSMRCY = 10 msec */ 1967 /* TRSMRCY = 10 msec */
1968 msleep(10); 1968 msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b98efae6a1cf..d041c6826e43 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws);
676static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) 676static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
677{ 677{
678 struct usb_device *hdev = hub->hdev; 678 struct usb_device *hdev = hub->hdev;
679 struct usb_hcd *hcd;
680 int ret;
679 int port1; 681 int port1;
680 int status; 682 int status;
681 bool need_debounce_delay = false; 683 bool need_debounce_delay = false;
@@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
714 usb_autopm_get_interface_no_resume( 716 usb_autopm_get_interface_no_resume(
715 to_usb_interface(hub->intfdev)); 717 to_usb_interface(hub->intfdev));
716 return; /* Continues at init2: below */ 718 return; /* Continues at init2: below */
719 } else if (type == HUB_RESET_RESUME) {
720 /* The internal host controller state for the hub device
721 * may be gone after a host power loss on system resume.
722 * Update the device's info so the HW knows it's a hub.
723 */
724 hcd = bus_to_hcd(hdev->bus);
725 if (hcd->driver->update_hub_device) {
726 ret = hcd->driver->update_hub_device(hcd, hdev,
727 &hub->tt, GFP_NOIO);
728 if (ret < 0) {
729 dev_err(hub->intfdev, "Host not "
730 "accepting hub info "
731 "update.\n");
732 dev_err(hub->intfdev, "LS/FS devices "
733 "and hubs may not work "
734 "under this hub\n.");
735 }
736 }
737 hub_power_on(hub, true);
717 } else { 738 } else {
718 hub_power_on(hub, true); 739 hub_power_on(hub, true);
719 } 740 }
@@ -2732,6 +2753,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2732 udev->ttport = hdev->ttport; 2753 udev->ttport = hdev->ttport;
2733 } else if (udev->speed != USB_SPEED_HIGH 2754 } else if (udev->speed != USB_SPEED_HIGH
2734 && hdev->speed == USB_SPEED_HIGH) { 2755 && hdev->speed == USB_SPEED_HIGH) {
2756 if (!hub->tt.hub) {
2757 dev_err(&udev->dev, "parent hub has no TT\n");
2758 retval = -EINVAL;
2759 goto fail;
2760 }
2735 udev->tt = &hub->tt; 2761 udev->tt = &hub->tt;
2736 udev->ttport = port1; 2762 udev->ttport = port1;
2737 } 2763 }
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1dc9739277b4..d50099675f28 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -509,7 +509,7 @@ config USB_LANGWELL
509 select USB_GADGET_SELECTED 509 select USB_GADGET_SELECTED
510 510
511config USB_GADGET_EG20T 511config USB_GADGET_EG20T
512 boolean "Intel EG20T(Topcliff) USB Device controller" 512 boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
513 depends on PCI 513 depends on PCI
514 select USB_GADGET_DUALSPEED 514 select USB_GADGET_DUALSPEED
515 help 515 help
@@ -525,6 +525,11 @@ config USB_GADGET_EG20T
525 This driver dose not support interrupt transfer or isochronous 525 This driver dose not support interrupt transfer or isochronous
526 transfer modes. 526 transfer modes.
527 527
528 This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
529 for IVI(In-Vehicle Infotainment) use.
530 ML7213 is companion chip for Intel Atom E6xx series.
531 ML7213 is completely compatible for Intel EG20T PCH.
532
528config USB_EG20T 533config USB_EG20T
529 tristate 534 tristate
530 depends on USB_GADGET_EG20T 535 depends on USB_GADGET_EG20T
@@ -541,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM
541 ci13xxx_udc core. 546 ci13xxx_udc core.
542 This driver depends on OTG driver for PHY initialization, 547 This driver depends on OTG driver for PHY initialization,
543 clock management, powering up VBUS, and power management. 548 clock management, powering up VBUS, and power management.
549 This driver is not supported on boards like trout which
550 has an external PHY.
544 551
545 Say "y" to link the driver statically, or "m" to build a 552 Say "y" to link the driver statically, or "m" to build a
546 dynamically linked module called "ci13xxx_msm" and force all 553 dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 31656a2b4ab4..a1c67ae1572a 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -76,10 +76,21 @@ static DEFINE_SPINLOCK(udc_lock);
76 76
77/* control endpoint description */ 77/* control endpoint description */
78static const struct usb_endpoint_descriptor 78static const struct usb_endpoint_descriptor
79ctrl_endpt_desc = { 79ctrl_endpt_out_desc = {
80 .bLength = USB_DT_ENDPOINT_SIZE, 80 .bLength = USB_DT_ENDPOINT_SIZE,
81 .bDescriptorType = USB_DT_ENDPOINT, 81 .bDescriptorType = USB_DT_ENDPOINT,
82 82
83 .bEndpointAddress = USB_DIR_OUT,
84 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
85 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
86};
87
88static const struct usb_endpoint_descriptor
89ctrl_endpt_in_desc = {
90 .bLength = USB_DT_ENDPOINT_SIZE,
91 .bDescriptorType = USB_DT_ENDPOINT,
92
93 .bEndpointAddress = USB_DIR_IN,
83 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 94 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
84 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX), 95 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
85}; 96};
@@ -265,10 +276,10 @@ static int hw_device_init(void __iomem *base)
265 hw_bank.size /= sizeof(u32); 276 hw_bank.size /= sizeof(u32);
266 277
267 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN); 278 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
268 if (reg == 0 || reg > ENDPT_MAX) 279 hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
269 return -ENODEV;
270 280
271 hw_ep_max = reg; /* cache hw ENDPT_MAX */ 281 if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
282 return -ENODEV;
272 283
273 /* setup lock mode ? */ 284 /* setup lock mode ? */
274 285
@@ -1197,16 +1208,17 @@ static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
1197 } 1208 }
1198 1209
1199 spin_lock_irqsave(udc->lock, flags); 1210 spin_lock_irqsave(udc->lock, flags);
1200 for (i = 0; i < hw_ep_max; i++) { 1211 for (i = 0; i < hw_ep_max/2; i++) {
1201 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 1212 struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
1213 struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
1202 n += scnprintf(buf + n, PAGE_SIZE - n, 1214 n += scnprintf(buf + n, PAGE_SIZE - n,
1203 "EP=%02i: RX=%08X TX=%08X\n", 1215 "EP=%02i: RX=%08X TX=%08X\n",
1204 i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma); 1216 i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
1205 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) { 1217 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
1206 n += scnprintf(buf + n, PAGE_SIZE - n, 1218 n += scnprintf(buf + n, PAGE_SIZE - n,
1207 " %04X: %08X %08X\n", j, 1219 " %04X: %08X %08X\n", j,
1208 *((u32 *)mEp->qh[RX].ptr + j), 1220 *((u32 *)mEpRx->qh.ptr + j),
1209 *((u32 *)mEp->qh[TX].ptr + j)); 1221 *((u32 *)mEpTx->qh.ptr + j));
1210 } 1222 }
1211 } 1223 }
1212 spin_unlock_irqrestore(udc->lock, flags); 1224 spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1293 unsigned long flags; 1305 unsigned long flags;
1294 struct list_head *ptr = NULL; 1306 struct list_head *ptr = NULL;
1295 struct ci13xxx_req *req = NULL; 1307 struct ci13xxx_req *req = NULL;
1296 unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32); 1308 unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
1297 1309
1298 dbg_trace("[%s] %p\n", __func__, buf); 1310 dbg_trace("[%s] %p\n", __func__, buf);
1299 if (attr == NULL || buf == NULL) { 1311 if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@ static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1303 1315
1304 spin_lock_irqsave(udc->lock, flags); 1316 spin_lock_irqsave(udc->lock, flags);
1305 for (i = 0; i < hw_ep_max; i++) 1317 for (i = 0; i < hw_ep_max; i++)
1306 for (k = RX; k <= TX; k++) 1318 list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
1307 list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue) 1319 {
1308 { 1320 req = list_entry(ptr, struct ci13xxx_req, queue);
1309 req = list_entry(ptr, 1321
1310 struct ci13xxx_req, queue); 1322 n += scnprintf(buf + n, PAGE_SIZE - n,
1323 "EP=%02i: TD=%08X %s\n",
1324 i % hw_ep_max/2, (u32)req->dma,
1325 ((i < hw_ep_max/2) ? "RX" : "TX"));
1311 1326
1327 for (j = 0; j < qSize; j++)
1312 n += scnprintf(buf + n, PAGE_SIZE - n, 1328 n += scnprintf(buf + n, PAGE_SIZE - n,
1313 "EP=%02i: TD=%08X %s\n", 1329 " %04X: %08X\n", j,
1314 i, (u32)req->dma, 1330 *((u32 *)req->ptr + j));
1315 ((k == RX) ? "RX" : "TX")); 1331 }
1316
1317 for (j = 0; j < qSize; j++)
1318 n += scnprintf(buf + n, PAGE_SIZE - n,
1319 " %04X: %08X\n", j,
1320 *((u32 *)req->ptr + j));
1321 }
1322 spin_unlock_irqrestore(udc->lock, flags); 1332 spin_unlock_irqrestore(udc->lock, flags);
1323 1333
1324 return n; 1334 return n;
@@ -1467,12 +1477,12 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1467 * At this point it's guaranteed exclusive access to qhead 1477 * At this point it's guaranteed exclusive access to qhead
1468 * (endpt is not primed) so it's no need to use tripwire 1478 * (endpt is not primed) so it's no need to use tripwire
1469 */ 1479 */
1470 mEp->qh[mEp->dir].ptr->td.next = mReq->dma; /* TERMINATE = 0 */ 1480 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
1471 mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS; /* clear status */ 1481 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
1472 if (mReq->req.zero == 0) 1482 if (mReq->req.zero == 0)
1473 mEp->qh[mEp->dir].ptr->cap |= QH_ZLT; 1483 mEp->qh.ptr->cap |= QH_ZLT;
1474 else 1484 else
1475 mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT; 1485 mEp->qh.ptr->cap &= ~QH_ZLT;
1476 1486
1477 wmb(); /* synchronize before ep prime */ 1487 wmb(); /* synchronize before ep prime */
1478 1488
@@ -1542,11 +1552,11 @@ __acquires(mEp->lock)
1542 1552
1543 hw_ep_flush(mEp->num, mEp->dir); 1553 hw_ep_flush(mEp->num, mEp->dir);
1544 1554
1545 while (!list_empty(&mEp->qh[mEp->dir].queue)) { 1555 while (!list_empty(&mEp->qh.queue)) {
1546 1556
1547 /* pop oldest request */ 1557 /* pop oldest request */
1548 struct ci13xxx_req *mReq = \ 1558 struct ci13xxx_req *mReq = \
1549 list_entry(mEp->qh[mEp->dir].queue.next, 1559 list_entry(mEp->qh.queue.next,
1550 struct ci13xxx_req, queue); 1560 struct ci13xxx_req, queue);
1551 list_del_init(&mReq->queue); 1561 list_del_init(&mReq->queue);
1552 mReq->req.status = -ESHUTDOWN; 1562 mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1571{ 1581{
1572 struct usb_ep *ep; 1582 struct usb_ep *ep;
1573 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); 1583 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1574 struct ci13xxx_ep *mEp = container_of(gadget->ep0,
1575 struct ci13xxx_ep, ep);
1576 1584
1577 trace("%p", gadget); 1585 trace("%p", gadget);
1578 1586
@@ -1583,7 +1591,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1583 gadget_for_each_ep(ep, gadget) { 1591 gadget_for_each_ep(ep, gadget) {
1584 usb_ep_fifo_flush(ep); 1592 usb_ep_fifo_flush(ep);
1585 } 1593 }
1586 usb_ep_fifo_flush(gadget->ep0); 1594 usb_ep_fifo_flush(&udc->ep0out.ep);
1595 usb_ep_fifo_flush(&udc->ep0in.ep);
1587 1596
1588 udc->driver->disconnect(gadget); 1597 udc->driver->disconnect(gadget);
1589 1598
@@ -1591,11 +1600,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1591 gadget_for_each_ep(ep, gadget) { 1600 gadget_for_each_ep(ep, gadget) {
1592 usb_ep_disable(ep); 1601 usb_ep_disable(ep);
1593 } 1602 }
1594 usb_ep_disable(gadget->ep0); 1603 usb_ep_disable(&udc->ep0out.ep);
1604 usb_ep_disable(&udc->ep0in.ep);
1595 1605
1596 if (mEp->status != NULL) { 1606 if (udc->status != NULL) {
1597 usb_ep_free_request(gadget->ep0, mEp->status); 1607 usb_ep_free_request(&udc->ep0in.ep, udc->status);
1598 mEp->status = NULL; 1608 udc->status = NULL;
1599 } 1609 }
1600 1610
1601 return 0; 1611 return 0;
@@ -1614,7 +1624,6 @@ static void isr_reset_handler(struct ci13xxx *udc)
1614__releases(udc->lock) 1624__releases(udc->lock)
1615__acquires(udc->lock) 1625__acquires(udc->lock)
1616{ 1626{
1617 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
1618 int retval; 1627 int retval;
1619 1628
1620 trace("%p", udc); 1629 trace("%p", udc);
@@ -1635,11 +1644,15 @@ __acquires(udc->lock)
1635 if (retval) 1644 if (retval)
1636 goto done; 1645 goto done;
1637 1646
1638 retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc); 1647 retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
1648 if (retval)
1649 goto done;
1650
1651 retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
1639 if (!retval) { 1652 if (!retval) {
1640 mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC); 1653 udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
1641 if (mEp->status == NULL) { 1654 if (udc->status == NULL) {
1642 usb_ep_disable(&mEp->ep); 1655 usb_ep_disable(&udc->ep0out.ep);
1643 retval = -ENOMEM; 1656 retval = -ENOMEM;
1644 } 1657 }
1645 } 1658 }
@@ -1672,16 +1685,17 @@ static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
1672 1685
1673/** 1686/**
1674 * isr_get_status_response: get_status request response 1687 * isr_get_status_response: get_status request response
1675 * @ep: endpoint 1688 * @udc: udc struct
1676 * @setup: setup request packet 1689 * @setup: setup request packet
1677 * 1690 *
1678 * This function returns an error code 1691 * This function returns an error code
1679 */ 1692 */
1680static int isr_get_status_response(struct ci13xxx_ep *mEp, 1693static int isr_get_status_response(struct ci13xxx *udc,
1681 struct usb_ctrlrequest *setup) 1694 struct usb_ctrlrequest *setup)
1682__releases(mEp->lock) 1695__releases(mEp->lock)
1683__acquires(mEp->lock) 1696__acquires(mEp->lock)
1684{ 1697{
1698 struct ci13xxx_ep *mEp = &udc->ep0in;
1685 struct usb_request *req = NULL; 1699 struct usb_request *req = NULL;
1686 gfp_t gfp_flags = GFP_ATOMIC; 1700 gfp_t gfp_flags = GFP_ATOMIC;
1687 int dir, num, retval; 1701 int dir, num, retval;
@@ -1736,27 +1750,23 @@ __acquires(mEp->lock)
1736 1750
1737/** 1751/**
1738 * isr_setup_status_phase: queues the status phase of a setup transation 1752 * isr_setup_status_phase: queues the status phase of a setup transation
1739 * @mEp: endpoint 1753 * @udc: udc struct
1740 * 1754 *
1741 * This function returns an error code 1755 * This function returns an error code
1742 */ 1756 */
1743static int isr_setup_status_phase(struct ci13xxx_ep *mEp) 1757static int isr_setup_status_phase(struct ci13xxx *udc)
1744__releases(mEp->lock) 1758__releases(mEp->lock)
1745__acquires(mEp->lock) 1759__acquires(mEp->lock)
1746{ 1760{
1747 int retval; 1761 int retval;
1762 struct ci13xxx_ep *mEp;
1748 1763
1749 trace("%p", mEp); 1764 trace("%p", udc);
1750
1751 /* mEp is always valid & configured */
1752
1753 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
1754 mEp->dir = (mEp->dir == TX) ? RX : TX;
1755 1765
1756 mEp->status->no_interrupt = 1; 1766 mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
1757 1767
1758 spin_unlock(mEp->lock); 1768 spin_unlock(mEp->lock);
1759 retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC); 1769 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
1760 spin_lock(mEp->lock); 1770 spin_lock(mEp->lock);
1761 1771
1762 return retval; 1772 return retval;
@@ -1778,11 +1788,11 @@ __acquires(mEp->lock)
1778 1788
1779 trace("%p", mEp); 1789 trace("%p", mEp);
1780 1790
1781 if (list_empty(&mEp->qh[mEp->dir].queue)) 1791 if (list_empty(&mEp->qh.queue))
1782 return -EINVAL; 1792 return -EINVAL;
1783 1793
1784 /* pop oldest request */ 1794 /* pop oldest request */
1785 mReq = list_entry(mEp->qh[mEp->dir].queue.next, 1795 mReq = list_entry(mEp->qh.queue.next,
1786 struct ci13xxx_req, queue); 1796 struct ci13xxx_req, queue);
1787 list_del_init(&mReq->queue); 1797 list_del_init(&mReq->queue);
1788 1798
@@ -1794,10 +1804,10 @@ __acquires(mEp->lock)
1794 1804
1795 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval); 1805 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
1796 1806
1797 if (!list_empty(&mEp->qh[mEp->dir].queue)) { 1807 if (!list_empty(&mEp->qh.queue)) {
1798 struct ci13xxx_req* mReqEnq; 1808 struct ci13xxx_req* mReqEnq;
1799 1809
1800 mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next, 1810 mReqEnq = list_entry(mEp->qh.queue.next,
1801 struct ci13xxx_req, queue); 1811 struct ci13xxx_req, queue);
1802 _hardware_enqueue(mEp, mReqEnq); 1812 _hardware_enqueue(mEp, mReqEnq);
1803 } 1813 }
@@ -1836,16 +1846,14 @@ __acquires(udc->lock)
1836 int type, num, err = -EINVAL; 1846 int type, num, err = -EINVAL;
1837 struct usb_ctrlrequest req; 1847 struct usb_ctrlrequest req;
1838 1848
1839
1840 if (mEp->desc == NULL) 1849 if (mEp->desc == NULL)
1841 continue; /* not configured */ 1850 continue; /* not configured */
1842 1851
1843 if ((mEp->dir == RX && hw_test_and_clear_complete(i)) || 1852 if (hw_test_and_clear_complete(i)) {
1844 (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
1845 err = isr_tr_complete_low(mEp); 1853 err = isr_tr_complete_low(mEp);
1846 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) { 1854 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1847 if (err > 0) /* needs status phase */ 1855 if (err > 0) /* needs status phase */
1848 err = isr_setup_status_phase(mEp); 1856 err = isr_setup_status_phase(udc);
1849 if (err < 0) { 1857 if (err < 0) {
1850 dbg_event(_usb_addr(mEp), 1858 dbg_event(_usb_addr(mEp),
1851 "ERROR", err); 1859 "ERROR", err);
@@ -1866,15 +1874,22 @@ __acquires(udc->lock)
1866 continue; 1874 continue;
1867 } 1875 }
1868 1876
1877 /*
1878 * Flush data and handshake transactions of previous
1879 * setup packet.
1880 */
1881 _ep_nuke(&udc->ep0out);
1882 _ep_nuke(&udc->ep0in);
1883
1869 /* read_setup_packet */ 1884 /* read_setup_packet */
1870 do { 1885 do {
1871 hw_test_and_set_setup_guard(); 1886 hw_test_and_set_setup_guard();
1872 memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req)); 1887 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
1873 } while (!hw_test_and_clear_setup_guard()); 1888 } while (!hw_test_and_clear_setup_guard());
1874 1889
1875 type = req.bRequestType; 1890 type = req.bRequestType;
1876 1891
1877 mEp->dir = (type & USB_DIR_IN) ? TX : RX; 1892 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1878 1893
1879 dbg_setup(_usb_addr(mEp), &req); 1894 dbg_setup(_usb_addr(mEp), &req);
1880 1895
@@ -1895,7 +1910,7 @@ __acquires(udc->lock)
1895 if (err) 1910 if (err)
1896 break; 1911 break;
1897 } 1912 }
1898 err = isr_setup_status_phase(mEp); 1913 err = isr_setup_status_phase(udc);
1899 break; 1914 break;
1900 case USB_REQ_GET_STATUS: 1915 case USB_REQ_GET_STATUS:
1901 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) && 1916 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
@@ -1905,7 +1920,7 @@ __acquires(udc->lock)
1905 if (le16_to_cpu(req.wLength) != 2 || 1920 if (le16_to_cpu(req.wLength) != 2 ||
1906 le16_to_cpu(req.wValue) != 0) 1921 le16_to_cpu(req.wValue) != 0)
1907 break; 1922 break;
1908 err = isr_get_status_response(mEp, &req); 1923 err = isr_get_status_response(udc, &req);
1909 break; 1924 break;
1910 case USB_REQ_SET_ADDRESS: 1925 case USB_REQ_SET_ADDRESS:
1911 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE)) 1926 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@ __acquires(udc->lock)
1916 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue)); 1931 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
1917 if (err) 1932 if (err)
1918 break; 1933 break;
1919 err = isr_setup_status_phase(mEp); 1934 err = isr_setup_status_phase(udc);
1920 break; 1935 break;
1921 case USB_REQ_SET_FEATURE: 1936 case USB_REQ_SET_FEATURE:
1922 if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) && 1937 if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@ __acquires(udc->lock)
1932 spin_lock(udc->lock); 1947 spin_lock(udc->lock);
1933 if (err) 1948 if (err)
1934 break; 1949 break;
1935 err = isr_setup_status_phase(mEp); 1950 err = isr_setup_status_phase(udc);
1936 break; 1951 break;
1937 default: 1952 default:
1938delegate: 1953delegate:
1939 if (req.wLength == 0) /* no data phase */ 1954 if (req.wLength == 0) /* no data phase */
1940 mEp->dir = TX; 1955 udc->ep0_dir = TX;
1941 1956
1942 spin_unlock(udc->lock); 1957 spin_unlock(udc->lock);
1943 err = udc->driver->setup(&udc->gadget, &req); 1958 err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@ static int ep_enable(struct usb_ep *ep,
1968 const struct usb_endpoint_descriptor *desc) 1983 const struct usb_endpoint_descriptor *desc)
1969{ 1984{
1970 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); 1985 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
1971 int direction, retval = 0; 1986 int retval = 0;
1972 unsigned long flags; 1987 unsigned long flags;
1973 1988
1974 trace("%p, %p", ep, desc); 1989 trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@ static int ep_enable(struct usb_ep *ep,
1982 1997
1983 mEp->desc = desc; 1998 mEp->desc = desc;
1984 1999
1985 if (!list_empty(&mEp->qh[mEp->dir].queue)) 2000 if (!list_empty(&mEp->qh.queue))
1986 warn("enabling a non-empty endpoint!"); 2001 warn("enabling a non-empty endpoint!");
1987 2002
1988 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX; 2003 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@ static int ep_enable(struct usb_ep *ep,
1991 2006
1992 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize); 2007 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
1993 2008
1994 direction = mEp->dir; 2009 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1995 do {
1996 dbg_event(_usb_addr(mEp), "ENABLE", 0);
1997 2010
1998 mEp->qh[mEp->dir].ptr->cap = 0; 2011 mEp->qh.ptr->cap = 0;
1999 2012
2000 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 2013 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2001 mEp->qh[mEp->dir].ptr->cap |= QH_IOS; 2014 mEp->qh.ptr->cap |= QH_IOS;
2002 else if (mEp->type == USB_ENDPOINT_XFER_ISOC) 2015 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
2003 mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT; 2016 mEp->qh.ptr->cap &= ~QH_MULT;
2004 else 2017 else
2005 mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT; 2018 mEp->qh.ptr->cap &= ~QH_ZLT;
2006
2007 mEp->qh[mEp->dir].ptr->cap |=
2008 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2009 mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE; /* needed? */
2010
2011 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
2012 2019
2013 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) 2020 mEp->qh.ptr->cap |=
2014 mEp->dir = (mEp->dir == TX) ? RX : TX; 2021 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2022 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
2015 2023
2016 } while (mEp->dir != direction); 2024 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
2017 2025
2018 spin_unlock_irqrestore(mEp->lock, flags); 2026 spin_unlock_irqrestore(mEp->lock, flags);
2019 return retval; 2027 return retval;
@@ -2146,7 +2154,7 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2146 spin_lock_irqsave(mEp->lock, flags); 2154 spin_lock_irqsave(mEp->lock, flags);
2147 2155
2148 if (mEp->type == USB_ENDPOINT_XFER_CONTROL && 2156 if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
2149 !list_empty(&mEp->qh[mEp->dir].queue)) { 2157 !list_empty(&mEp->qh.queue)) {
2150 _ep_nuke(mEp); 2158 _ep_nuke(mEp);
2151 retval = -EOVERFLOW; 2159 retval = -EOVERFLOW;
2152 warn("endpoint ctrl %X nuked", _usb_addr(mEp)); 2160 warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2170 /* push request */ 2178 /* push request */
2171 mReq->req.status = -EINPROGRESS; 2179 mReq->req.status = -EINPROGRESS;
2172 mReq->req.actual = 0; 2180 mReq->req.actual = 0;
2173 list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue); 2181 list_add_tail(&mReq->queue, &mEp->qh.queue);
2174 2182
2175 if (list_is_singular(&mEp->qh[mEp->dir].queue)) 2183 if (list_is_singular(&mEp->qh.queue))
2176 retval = _hardware_enqueue(mEp, mReq); 2184 retval = _hardware_enqueue(mEp, mReq);
2177 2185
2178 if (retval == -EALREADY) { 2186 if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2199 trace("%p, %p", ep, req); 2207 trace("%p, %p", ep, req);
2200 2208
2201 if (ep == NULL || req == NULL || mEp->desc == NULL || 2209 if (ep == NULL || req == NULL || mEp->desc == NULL ||
2202 list_empty(&mReq->queue) || list_empty(&mEp->qh[mEp->dir].queue)) 2210 list_empty(&mReq->queue) || list_empty(&mEp->qh.queue))
2203 return -EINVAL; 2211 return -EINVAL;
2204 2212
2205 spin_lock_irqsave(mEp->lock, flags); 2213 spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@ static int ep_set_halt(struct usb_ep *ep, int value)
2244#ifndef STALL_IN 2252#ifndef STALL_IN
2245 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */ 2253 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
2246 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX && 2254 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
2247 !list_empty(&mEp->qh[mEp->dir].queue)) { 2255 !list_empty(&mEp->qh.queue)) {
2248 spin_unlock_irqrestore(mEp->lock, flags); 2256 spin_unlock_irqrestore(mEp->lock, flags);
2249 return -EAGAIN; 2257 return -EAGAIN;
2250 } 2258 }
@@ -2355,7 +2363,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
2355 if (is_active) { 2363 if (is_active) {
2356 pm_runtime_get_sync(&_gadget->dev); 2364 pm_runtime_get_sync(&_gadget->dev);
2357 hw_device_reset(udc); 2365 hw_device_reset(udc);
2358 hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma); 2366 hw_device_state(udc->ep0out.qh.dma);
2359 } else { 2367 } else {
2360 hw_device_state(0); 2368 hw_device_state(0);
2361 if (udc->udc_driver->notify_event) 2369 if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2390 int (*bind)(struct usb_gadget *)) 2398 int (*bind)(struct usb_gadget *))
2391{ 2399{
2392 struct ci13xxx *udc = _udc; 2400 struct ci13xxx *udc = _udc;
2393 unsigned long i, k, flags; 2401 unsigned long flags;
2402 int i, j;
2394 int retval = -ENOMEM; 2403 int retval = -ENOMEM;
2395 2404
2396 trace("%p", driver); 2405 trace("%p", driver);
@@ -2427,45 +2436,46 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2427 2436
2428 info("hw_ep_max = %d", hw_ep_max); 2437 info("hw_ep_max = %d", hw_ep_max);
2429 2438
2430 udc->driver = driver;
2431 udc->gadget.dev.driver = NULL; 2439 udc->gadget.dev.driver = NULL;
2432 2440
2433 retval = 0; 2441 retval = 0;
2434 for (i = 0; i < hw_ep_max; i++) { 2442 for (i = 0; i < hw_ep_max/2; i++) {
2435 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 2443 for (j = RX; j <= TX; j++) {
2444 int k = i + j * hw_ep_max/2;
2445 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
2436 2446
2437 scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i); 2447 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
2448 (j == TX) ? "in" : "out");
2438 2449
2439 mEp->lock = udc->lock; 2450 mEp->lock = udc->lock;
2440 mEp->device = &udc->gadget.dev; 2451 mEp->device = &udc->gadget.dev;
2441 mEp->td_pool = udc->td_pool; 2452 mEp->td_pool = udc->td_pool;
2442 2453
2443 mEp->ep.name = mEp->name; 2454 mEp->ep.name = mEp->name;
2444 mEp->ep.ops = &usb_ep_ops; 2455 mEp->ep.ops = &usb_ep_ops;
2445 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; 2456 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
2446 2457
2447 /* this allocation cannot be random */ 2458 INIT_LIST_HEAD(&mEp->qh.queue);
2448 for (k = RX; k <= TX; k++) {
2449 INIT_LIST_HEAD(&mEp->qh[k].queue);
2450 spin_unlock_irqrestore(udc->lock, flags); 2459 spin_unlock_irqrestore(udc->lock, flags);
2451 mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool, 2460 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
2452 GFP_KERNEL, 2461 &mEp->qh.dma);
2453 &mEp->qh[k].dma);
2454 spin_lock_irqsave(udc->lock, flags); 2462 spin_lock_irqsave(udc->lock, flags);
2455 if (mEp->qh[k].ptr == NULL) 2463 if (mEp->qh.ptr == NULL)
2456 retval = -ENOMEM; 2464 retval = -ENOMEM;
2457 else 2465 else
2458 memset(mEp->qh[k].ptr, 0, 2466 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
2459 sizeof(*mEp->qh[k].ptr)); 2467
2460 } 2468 /* skip ep0 out and in endpoints */
2461 if (i == 0) 2469 if (i == 0)
2462 udc->gadget.ep0 = &mEp->ep; 2470 continue;
2463 else 2471
2464 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list); 2472 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
2473 }
2465 } 2474 }
2466 if (retval) 2475 if (retval)
2467 goto done; 2476 goto done;
2468 2477
2478 udc->gadget.ep0 = &udc->ep0in.ep;
2469 /* bind gadget */ 2479 /* bind gadget */
2470 driver->driver.bus = NULL; 2480 driver->driver.bus = NULL;
2471 udc->gadget.dev.driver = &driver->driver; 2481 udc->gadget.dev.driver = &driver->driver;
@@ -2479,6 +2489,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2479 goto done; 2489 goto done;
2480 } 2490 }
2481 2491
2492 udc->driver = driver;
2482 pm_runtime_get_sync(&udc->gadget.dev); 2493 pm_runtime_get_sync(&udc->gadget.dev);
2483 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) { 2494 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
2484 if (udc->vbus_active) { 2495 if (udc->vbus_active) {
@@ -2490,14 +2501,12 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2490 } 2501 }
2491 } 2502 }
2492 2503
2493 retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma); 2504 retval = hw_device_state(udc->ep0out.qh.dma);
2494 if (retval) 2505 if (retval)
2495 pm_runtime_put_sync(&udc->gadget.dev); 2506 pm_runtime_put_sync(&udc->gadget.dev);
2496 2507
2497 done: 2508 done:
2498 spin_unlock_irqrestore(udc->lock, flags); 2509 spin_unlock_irqrestore(udc->lock, flags);
2499 if (retval)
2500 usb_gadget_unregister_driver(driver);
2501 return retval; 2510 return retval;
2502} 2511}
2503EXPORT_SYMBOL(usb_gadget_probe_driver); 2512EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -2510,7 +2519,7 @@ EXPORT_SYMBOL(usb_gadget_probe_driver);
2510int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2519int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2511{ 2520{
2512 struct ci13xxx *udc = _udc; 2521 struct ci13xxx *udc = _udc;
2513 unsigned long i, k, flags; 2522 unsigned long i, flags;
2514 2523
2515 trace("%p", driver); 2524 trace("%p", driver);
2516 2525
@@ -2546,17 +2555,14 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2546 for (i = 0; i < hw_ep_max; i++) { 2555 for (i = 0; i < hw_ep_max; i++) {
2547 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i]; 2556 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
2548 2557
2549 if (i == 0) 2558 if (!list_empty(&mEp->ep.ep_list))
2550 udc->gadget.ep0 = NULL;
2551 else if (!list_empty(&mEp->ep.ep_list))
2552 list_del_init(&mEp->ep.ep_list); 2559 list_del_init(&mEp->ep.ep_list);
2553 2560
2554 for (k = RX; k <= TX; k++) 2561 if (mEp->qh.ptr != NULL)
2555 if (mEp->qh[k].ptr != NULL) 2562 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
2556 dma_pool_free(udc->qh_pool,
2557 mEp->qh[k].ptr, mEp->qh[k].dma);
2558 } 2563 }
2559 2564
2565 udc->gadget.ep0 = NULL;
2560 udc->driver = NULL; 2566 udc->driver = NULL;
2561 2567
2562 spin_unlock_irqrestore(udc->lock, flags); 2568 spin_unlock_irqrestore(udc->lock, flags);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index f61fed07f76b..a2492b65f98c 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -20,7 +20,7 @@
20 * DEFINE 20 * DEFINE
21 *****************************************************************************/ 21 *****************************************************************************/
22#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */ 22#define CI13XXX_PAGE_SIZE 4096ul /* page size for TD's */
23#define ENDPT_MAX (16) 23#define ENDPT_MAX (32)
24#define CTRL_PAYLOAD_MAX (64) 24#define CTRL_PAYLOAD_MAX (64)
25#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */ 25#define RX (0) /* similar to USB_DIR_OUT but can be used as an index */
26#define TX (1) /* similar to USB_DIR_IN but can be used as an index */ 26#define TX (1) /* similar to USB_DIR_IN but can be used as an index */
@@ -88,8 +88,7 @@ struct ci13xxx_ep {
88 struct list_head queue; 88 struct list_head queue;
89 struct ci13xxx_qh *ptr; 89 struct ci13xxx_qh *ptr;
90 dma_addr_t dma; 90 dma_addr_t dma;
91 } qh[2]; 91 } qh;
92 struct usb_request *status;
93 int wedge; 92 int wedge;
94 93
95 /* global resources */ 94 /* global resources */
@@ -119,9 +118,13 @@ struct ci13xxx {
119 118
120 struct dma_pool *qh_pool; /* DMA pool for queue heads */ 119 struct dma_pool *qh_pool; /* DMA pool for queue heads */
121 struct dma_pool *td_pool; /* DMA pool for transfer descs */ 120 struct dma_pool *td_pool; /* DMA pool for transfer descs */
121 struct usb_request *status; /* ep0 status request */
122 122
123 struct usb_gadget gadget; /* USB slave device */ 123 struct usb_gadget gadget; /* USB slave device */
124 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */ 124 struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
125 u32 ep0_dir; /* ep0 direction */
126#define ep0out ci13xxx_ep[0]
127#define ep0in ci13xxx_ep[16]
125 128
126 struct usb_gadget_driver *driver; /* 3rd party gadget driver */ 129 struct usb_gadget_driver *driver; /* 3rd party gadget driver */
127 struct ci13xxx_udc_driver *udc_driver; /* device controller driver */ 130 struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f6ff8456d52d..1ba4befe336b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -928,8 +928,9 @@ unknown:
928 */ 928 */
929 switch (ctrl->bRequestType & USB_RECIP_MASK) { 929 switch (ctrl->bRequestType & USB_RECIP_MASK) {
930 case USB_RECIP_INTERFACE: 930 case USB_RECIP_INTERFACE:
931 if (cdev->config) 931 if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
932 f = cdev->config->interface[intf]; 932 break;
933 f = cdev->config->interface[intf];
933 break; 934 break;
934 935
935 case USB_RECIP_ENDPOINT: 936 case USB_RECIP_ENDPOINT:
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb2308f56..6d8e533949eb 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
293 293
294#include <linux/usb/ch9.h> 294#include <linux/usb/ch9.h>
295#include <linux/usb/gadget.h> 295#include <linux/usb/gadget.h>
296#include <linux/usb/composite.h>
296 297
297#include "gadget_chips.h" 298#include "gadget_chips.h"
298 299
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2763 return ERR_PTR(-ENOMEM); 2764 return ERR_PTR(-ENOMEM);
2764 common->free_storage_on_release = 1; 2765 common->free_storage_on_release = 1;
2765 } else { 2766 } else {
2766 memset(common, 0, sizeof common); 2767 memset(common, 0, sizeof *common);
2767 common->free_storage_on_release = 0; 2768 common->free_storage_on_release = 0;
2768 } 2769 }
2769 2770
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 0c8dd81dddca..b120dbb64d0f 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -198,10 +198,10 @@
198#define PCH_UDC_BRLEN 0x0F /* Burst length */ 198#define PCH_UDC_BRLEN 0x0F /* Burst length */
199#define PCH_UDC_THLEN 0x1F /* Threshold length */ 199#define PCH_UDC_THLEN 0x1F /* Threshold length */
200/* Value of EP Buffer Size */ 200/* Value of EP Buffer Size */
201#define UDC_EP0IN_BUFF_SIZE 64 201#define UDC_EP0IN_BUFF_SIZE 16
202#define UDC_EPIN_BUFF_SIZE 512 202#define UDC_EPIN_BUFF_SIZE 256
203#define UDC_EP0OUT_BUFF_SIZE 64 203#define UDC_EP0OUT_BUFF_SIZE 16
204#define UDC_EPOUT_BUFF_SIZE 512 204#define UDC_EPOUT_BUFF_SIZE 256
205/* Value of EP maximum packet size */ 205/* Value of EP maximum packet size */
206#define UDC_EP0IN_MAX_PKT_SIZE 64 206#define UDC_EP0IN_MAX_PKT_SIZE 64
207#define UDC_EP0OUT_MAX_PKT_SIZE 64 207#define UDC_EP0OUT_MAX_PKT_SIZE 64
@@ -351,7 +351,7 @@ struct pch_udc_dev {
351 struct pci_pool *data_requests; 351 struct pci_pool *data_requests;
352 struct pci_pool *stp_requests; 352 struct pci_pool *stp_requests;
353 dma_addr_t dma_addr; 353 dma_addr_t dma_addr;
354 unsigned long ep0out_buf[64]; 354 void *ep0out_buf;
355 struct usb_ctrlrequest setup_data; 355 struct usb_ctrlrequest setup_data;
356 unsigned long phys_addr; 356 unsigned long phys_addr;
357 void __iomem *base_addr; 357 void __iomem *base_addr;
@@ -361,6 +361,8 @@ struct pch_udc_dev {
361 361
362#define PCH_UDC_PCI_BAR 1 362#define PCH_UDC_PCI_BAR 1
363#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 363#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
364#define PCI_VENDOR_ID_ROHM 0x10DB
365#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
364 366
365static const char ep0_string[] = "ep0in"; 367static const char ep0_string[] = "ep0in";
366static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 368static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -1219,11 +1221,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1219 dev = ep->dev; 1221 dev = ep->dev;
1220 if (req->dma_mapped) { 1222 if (req->dma_mapped) {
1221 if (ep->in) 1223 if (ep->in)
1222 pci_unmap_single(dev->pdev, req->req.dma, 1224 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1223 req->req.length, PCI_DMA_TODEVICE); 1225 req->req.length, DMA_TO_DEVICE);
1224 else 1226 else
1225 pci_unmap_single(dev->pdev, req->req.dma, 1227 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1226 req->req.length, PCI_DMA_FROMDEVICE); 1228 req->req.length, DMA_FROM_DEVICE);
1227 req->dma_mapped = 0; 1229 req->dma_mapped = 0;
1228 req->req.dma = DMA_ADDR_INVALID; 1230 req->req.dma = DMA_ADDR_INVALID;
1229 } 1231 }
@@ -1414,7 +1416,6 @@ static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1414 1416
1415 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); 1417 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1416 td_data = req->td_data; 1418 td_data = req->td_data;
1417 ep->td_data = req->td_data;
1418 /* Set the status bits for all descriptors */ 1419 /* Set the status bits for all descriptors */
1419 while (1) { 1420 while (1) {
1420 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) | 1421 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1613 if (usbreq->length && 1614 if (usbreq->length &&
1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { 1615 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1615 if (ep->in) 1616 if (ep->in)
1616 usbreq->dma = pci_map_single(dev->pdev, usbreq->buf, 1617 usbreq->dma = dma_map_single(&dev->pdev->dev,
1617 usbreq->length, PCI_DMA_TODEVICE); 1618 usbreq->buf,
1619 usbreq->length,
1620 DMA_TO_DEVICE);
1618 else 1621 else
1619 usbreq->dma = pci_map_single(dev->pdev, usbreq->buf, 1622 usbreq->dma = dma_map_single(&dev->pdev->dev,
1620 usbreq->length, PCI_DMA_FROMDEVICE); 1623 usbreq->buf,
1624 usbreq->length,
1625 DMA_FROM_DEVICE);
1621 req->dma_mapped = 1; 1626 req->dma_mapped = 1;
1622 } 1627 }
1623 if (usbreq->length > 0) { 1628 if (usbreq->length > 0) {
1624 retval = prepare_dma(ep, req, gfp); 1629 retval = prepare_dma(ep, req, GFP_ATOMIC);
1625 if (retval) 1630 if (retval)
1626 goto probe_end; 1631 goto probe_end;
1627 } 1632 }
@@ -1646,7 +1651,6 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1646 pch_udc_wait_ep_stall(ep); 1651 pch_udc_wait_ep_stall(ep);
1647 pch_udc_ep_clear_nak(ep); 1652 pch_udc_ep_clear_nak(ep);
1648 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num)); 1653 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1649 pch_udc_set_dma(dev, DMA_DIR_TX);
1650 } 1654 }
1651 } 1655 }
1652 /* Now add this request to the ep's pending requests */ 1656 /* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@ static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
1926 PCH_UDC_BS_DMA_DONE) 1930 PCH_UDC_BS_DMA_DONE)
1927 return; 1931 return;
1928 pch_udc_clear_dma(ep->dev, DMA_DIR_RX); 1932 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1933 pch_udc_ep_set_ddptr(ep, 0);
1929 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) != 1934 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1930 PCH_UDC_RTS_SUCC) { 1935 PCH_UDC_RTS_SUCC) {
1931 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) " 1936 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@ static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
1963 u32 epsts; 1968 u32 epsts;
1964 struct pch_udc_ep *ep; 1969 struct pch_udc_ep *ep;
1965 1970
1966 ep = &dev->ep[2*ep_num]; 1971 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
1967 epsts = ep->epsts; 1972 epsts = ep->epsts;
1968 ep->epsts = 0; 1973 ep->epsts = 0;
1969 1974
@@ -2008,7 +2013,7 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2008 struct pch_udc_ep *ep; 2013 struct pch_udc_ep *ep;
2009 struct pch_udc_request *req = NULL; 2014 struct pch_udc_request *req = NULL;
2010 2015
2011 ep = &dev->ep[2*ep_num + 1]; 2016 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2012 epsts = ep->epsts; 2017 epsts = ep->epsts;
2013 ep->epsts = 0; 2018 ep->epsts = 0;
2014 2019
@@ -2025,10 +2030,11 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2025 } 2030 }
2026 if (epsts & UDC_EPSTS_HE) 2031 if (epsts & UDC_EPSTS_HE)
2027 return; 2032 return;
2028 if (epsts & UDC_EPSTS_RSS) 2033 if (epsts & UDC_EPSTS_RSS) {
2029 pch_udc_ep_set_stall(ep); 2034 pch_udc_ep_set_stall(ep);
2030 pch_udc_enable_ep_interrupts(ep->dev, 2035 pch_udc_enable_ep_interrupts(ep->dev,
2031 PCH_UDC_EPINT(ep->in, ep->num)); 2036 PCH_UDC_EPINT(ep->in, ep->num));
2037 }
2032 if (epsts & UDC_EPSTS_RCS) { 2038 if (epsts & UDC_EPSTS_RCS) {
2033 if (!dev->prot_stall) { 2039 if (!dev->prot_stall) {
2034 pch_udc_ep_clear_stall(ep); 2040 pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2060{ 2066{
2061 u32 epsts; 2067 u32 epsts;
2062 struct pch_udc_ep *ep; 2068 struct pch_udc_ep *ep;
2069 struct pch_udc_ep *ep_out;
2063 2070
2064 ep = &dev->ep[UDC_EP0IN_IDX]; 2071 ep = &dev->ep[UDC_EP0IN_IDX];
2072 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2065 epsts = ep->epsts; 2073 epsts = ep->epsts;
2066 ep->epsts = 0; 2074 ep->epsts = 0;
2067 2075
@@ -2073,8 +2081,16 @@ static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2073 return; 2081 return;
2074 if (epsts & UDC_EPSTS_HE) 2082 if (epsts & UDC_EPSTS_HE)
2075 return; 2083 return;
2076 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) 2084 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2077 pch_udc_complete_transfer(ep); 2085 pch_udc_complete_transfer(ep);
2086 pch_udc_clear_dma(dev, DMA_DIR_RX);
2087 ep_out->td_data->status = (ep_out->td_data->status &
2088 ~PCH_UDC_BUFF_STS) |
2089 PCH_UDC_BS_HST_RDY;
2090 pch_udc_ep_clear_nak(ep_out);
2091 pch_udc_set_dma(dev, DMA_DIR_RX);
2092 pch_udc_ep_set_rrdy(ep_out);
2093 }
2078 /* On IN interrupt, provide data if we have any */ 2094 /* On IN interrupt, provide data if we have any */
2079 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) && 2095 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2080 !(epsts & UDC_EPSTS_TXEMPTY)) 2096 !(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2102 dev->stall = 0; 2118 dev->stall = 0;
2103 dev->ep[UDC_EP0IN_IDX].halted = 0; 2119 dev->ep[UDC_EP0IN_IDX].halted = 0;
2104 dev->ep[UDC_EP0OUT_IDX].halted = 0; 2120 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2105 /* In data not ready */
2106 pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
2107 dev->setup_data = ep->td_stp->request; 2121 dev->setup_data = ep->td_stp->request;
2108 pch_udc_init_setup_buff(ep->td_stp); 2122 pch_udc_init_setup_buff(ep->td_stp);
2109 pch_udc_clear_dma(dev, DMA_DIR_TX); 2123 pch_udc_clear_dma(dev, DMA_DIR_RX);
2110 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]), 2124 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2111 dev->ep[UDC_EP0IN_IDX].in); 2125 dev->ep[UDC_EP0IN_IDX].in);
2112 if ((dev->setup_data.bRequestType & USB_DIR_IN)) 2126 if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2122 setup_supported = dev->driver->setup(&dev->gadget, 2136 setup_supported = dev->driver->setup(&dev->gadget,
2123 &dev->setup_data); 2137 &dev->setup_data);
2124 spin_lock(&dev->lock); 2138 spin_lock(&dev->lock);
2139
2140 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2141 ep->td_data->status = (ep->td_data->status &
2142 ~PCH_UDC_BUFF_STS) |
2143 PCH_UDC_BS_HST_RDY;
2144 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2145 }
2125 /* ep0 in returns data on IN phase */ 2146 /* ep0 in returns data on IN phase */
2126 if (setup_supported >= 0 && setup_supported < 2147 if (setup_supported >= 0 && setup_supported <
2127 UDC_EP0IN_MAX_PKT_SIZE) { 2148 UDC_EP0IN_MAX_PKT_SIZE) {
2128 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX])); 2149 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2129 /* Gadget would have queued a request when 2150 /* Gadget would have queued a request when
2130 * we called the setup */ 2151 * we called the setup */
2131 pch_udc_set_dma(dev, DMA_DIR_RX); 2152 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2132 pch_udc_ep_clear_nak(ep); 2153 pch_udc_set_dma(dev, DMA_DIR_RX);
2154 pch_udc_ep_clear_nak(ep);
2155 }
2133 } else if (setup_supported < 0) { 2156 } else if (setup_supported < 0) {
2134 /* if unsupported request, then stall */ 2157 /* if unsupported request, then stall */
2135 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX])); 2158 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2142 } 2165 }
2143 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) == 2166 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2144 UDC_EPSTS_OUT_DATA) && !dev->stall) { 2167 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2145 if (list_empty(&ep->queue)) { 2168 pch_udc_clear_dma(dev, DMA_DIR_RX);
2146 dev_err(&dev->pdev->dev, "%s: No request\n", __func__); 2169 pch_udc_ep_set_ddptr(ep, 0);
2147 ep->td_data->status = (ep->td_data->status & 2170 if (!list_empty(&ep->queue)) {
2148 ~PCH_UDC_BUFF_STS) |
2149 PCH_UDC_BS_HST_RDY;
2150 pch_udc_set_dma(dev, DMA_DIR_RX);
2151 } else {
2152 /* control write */
2153 /* next function will pickuo an clear the status */
2154 ep->epsts = stat; 2171 ep->epsts = stat;
2155 2172 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2156 pch_udc_svc_data_out(dev, 0);
2157 /* re-program desc. pointer for possible ZLPs */
2158 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2159 pch_udc_set_dma(dev, DMA_DIR_RX);
2160 } 2173 }
2174 pch_udc_set_dma(dev, DMA_DIR_RX);
2161 } 2175 }
2162 pch_udc_ep_set_rrdy(ep); 2176 pch_udc_ep_set_rrdy(ep);
2163} 2177}
@@ -2174,7 +2188,7 @@ static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2174 struct pch_udc_ep *ep; 2188 struct pch_udc_ep *ep;
2175 struct pch_udc_request *req; 2189 struct pch_udc_request *req;
2176 2190
2177 ep = &dev->ep[2*ep_num]; 2191 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2178 if (!list_empty(&ep->queue)) { 2192 if (!list_empty(&ep->queue)) {
2179 req = list_entry(ep->queue.next, struct pch_udc_request, queue); 2193 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2180 pch_udc_enable_ep_interrupts(ep->dev, 2194 pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@ static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2196 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) { 2210 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2197 /* IN */ 2211 /* IN */
2198 if (ep_intr & (0x1 << i)) { 2212 if (ep_intr & (0x1 << i)) {
2199 ep = &dev->ep[2*i]; 2213 ep = &dev->ep[UDC_EPIN_IDX(i)];
2200 ep->epsts = pch_udc_read_ep_status(ep); 2214 ep->epsts = pch_udc_read_ep_status(ep);
2201 pch_udc_clear_ep_status(ep, ep->epsts); 2215 pch_udc_clear_ep_status(ep, ep->epsts);
2202 } 2216 }
2203 /* OUT */ 2217 /* OUT */
2204 if (ep_intr & (0x10000 << i)) { 2218 if (ep_intr & (0x10000 << i)) {
2205 ep = &dev->ep[2*i+1]; 2219 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2206 ep->epsts = pch_udc_read_ep_status(ep); 2220 ep->epsts = pch_udc_read_ep_status(ep);
2207 pch_udc_clear_ep_status(ep, ep->epsts); 2221 pch_udc_clear_ep_status(ep, ep->epsts);
2208 } 2222 }
@@ -2563,9 +2577,6 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2563 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; 2577 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2564 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; 2578 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2565 2579
2566 dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
2567 PCI_DMA_FROMDEVICE);
2568
2569 /* remove ep0 in and out from the list. They have own pointer */ 2580 /* remove ep0 in and out from the list. They have own pointer */
2570 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list); 2581 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2571 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list); 2582 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@ static int init_dma_pools(struct pch_udc_dev *dev)
2637 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0; 2648 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2638 dev->ep[UDC_EP0IN_IDX].td_data = NULL; 2649 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2639 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0; 2650 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2651
2652 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2653 if (!dev->ep0out_buf)
2654 return -ENOMEM;
2655 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2656 UDC_EP0OUT_BUFF_SIZE * 4,
2657 DMA_FROM_DEVICE);
2640 return 0; 2658 return 0;
2641} 2659}
2642 2660
@@ -2700,7 +2718,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2700 2718
2701 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK); 2719 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2702 2720
2703 /* Assues that there are no pending requets with this driver */ 2721 /* Assures that there are no pending requests with this driver */
2722 driver->disconnect(&dev->gadget);
2704 driver->unbind(&dev->gadget); 2723 driver->unbind(&dev->gadget);
2705 dev->gadget.dev.driver = NULL; 2724 dev->gadget.dev.driver = NULL;
2706 dev->driver = NULL; 2725 dev->driver = NULL;
@@ -2750,6 +2769,11 @@ static void pch_udc_remove(struct pci_dev *pdev)
2750 pci_pool_destroy(dev->stp_requests); 2769 pci_pool_destroy(dev->stp_requests);
2751 } 2770 }
2752 2771
2772 if (dev->dma_addr)
2773 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
2774 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
2775 kfree(dev->ep0out_buf);
2776
2753 pch_udc_exit(dev); 2777 pch_udc_exit(dev);
2754 2778
2755 if (dev->irq_registered) 2779 if (dev->irq_registered)
@@ -2792,11 +2816,7 @@ static int pch_udc_resume(struct pci_dev *pdev)
2792 int ret; 2816 int ret;
2793 2817
2794 pci_set_power_state(pdev, PCI_D0); 2818 pci_set_power_state(pdev, PCI_D0);
2795 ret = pci_restore_state(pdev); 2819 pci_restore_state(pdev);
2796 if (ret) {
2797 dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
2798 return ret;
2799 }
2800 ret = pci_enable_device(pdev); 2820 ret = pci_enable_device(pdev);
2801 if (ret) { 2821 if (ret) {
2802 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__); 2822 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2914 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 2934 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2915 .class_mask = 0xffffffff, 2935 .class_mask = 0xffffffff,
2916 }, 2936 },
2937 {
2938 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
2939 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2940 .class_mask = 0xffffffff,
2941 },
2917 { 0 }, 2942 { 0 },
2918}; 2943};
2919 2944
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2fc8636316c5..12ff6cffedc9 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -131,31 +131,31 @@ static struct printer_dev usb_printer_gadget;
131 * parameters are in UTF-8 (superset of ASCII's 7 bit characters). 131 * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
132 */ 132 */
133 133
134static ushort __initdata idVendor; 134static ushort idVendor;
135module_param(idVendor, ushort, S_IRUGO); 135module_param(idVendor, ushort, S_IRUGO);
136MODULE_PARM_DESC(idVendor, "USB Vendor ID"); 136MODULE_PARM_DESC(idVendor, "USB Vendor ID");
137 137
138static ushort __initdata idProduct; 138static ushort idProduct;
139module_param(idProduct, ushort, S_IRUGO); 139module_param(idProduct, ushort, S_IRUGO);
140MODULE_PARM_DESC(idProduct, "USB Product ID"); 140MODULE_PARM_DESC(idProduct, "USB Product ID");
141 141
142static ushort __initdata bcdDevice; 142static ushort bcdDevice;
143module_param(bcdDevice, ushort, S_IRUGO); 143module_param(bcdDevice, ushort, S_IRUGO);
144MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); 144MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
145 145
146static char *__initdata iManufacturer; 146static char *iManufacturer;
147module_param(iManufacturer, charp, S_IRUGO); 147module_param(iManufacturer, charp, S_IRUGO);
148MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); 148MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
149 149
150static char *__initdata iProduct; 150static char *iProduct;
151module_param(iProduct, charp, S_IRUGO); 151module_param(iProduct, charp, S_IRUGO);
152MODULE_PARM_DESC(iProduct, "USB Product string"); 152MODULE_PARM_DESC(iProduct, "USB Product string");
153 153
154static char *__initdata iSerialNum; 154static char *iSerialNum;
155module_param(iSerialNum, charp, S_IRUGO); 155module_param(iSerialNum, charp, S_IRUGO);
156MODULE_PARM_DESC(iSerialNum, "1"); 156MODULE_PARM_DESC(iSerialNum, "1");
157 157
158static char *__initdata iPNPstring; 158static char *iPNPstring;
159module_param(iPNPstring, charp, S_IRUGO); 159module_param(iPNPstring, charp, S_IRUGO);
160MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;"); 160MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
161 161
@@ -1596,13 +1596,12 @@ cleanup(void)
1596 int status; 1596 int status;
1597 1597
1598 mutex_lock(&usb_printer_gadget.lock_printer_io); 1598 mutex_lock(&usb_printer_gadget.lock_printer_io);
1599 class_destroy(usb_gadget_class);
1600 unregister_chrdev_region(g_printer_devno, 2);
1601
1602 status = usb_gadget_unregister_driver(&printer_driver); 1599 status = usb_gadget_unregister_driver(&printer_driver);
1603 if (status) 1600 if (status)
1604 ERROR(dev, "usb_gadget_unregister_driver %x\n", status); 1601 ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
1605 1602
1603 unregister_chrdev_region(g_printer_devno, 2);
1604 class_destroy(usb_gadget_class);
1606 mutex_unlock(&usb_printer_gadget.lock_printer_io); 1605 mutex_unlock(&usb_printer_gadget.lock_printer_io);
1607} 1606}
1608module_exit(cleanup); 1607module_exit(cleanup);
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da319ae..015118535f77 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
258 break; 258 break;
259 case R8A66597_BULK: 259 case R8A66597_BULK:
260 /* isochronous pipes may be used as bulk pipes */ 260 /* isochronous pipes may be used as bulk pipes */
261 if (info->pipe > R8A66597_BASE_PIPENUM_BULK) 261 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK; 262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
263 else 263 else
264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC; 264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0f5878..0e6afa260ed8 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@ config USB_EHCI_MSM
151 Qualcomm chipsets. Root Hub has inbuilt TT. 151 Qualcomm chipsets. Root Hub has inbuilt TT.
152 This driver depends on OTG driver for PHY initialization, 152 This driver depends on OTG driver for PHY initialization,
153 clock management, powering up VBUS, and power management. 153 clock management, powering up VBUS, and power management.
154 This driver is not supported on boards like trout which
155 has an external PHY.
154 156
155config USB_EHCI_HCD_PPC_OF 157config USB_EHCI_HCD_PPC_OF
156 bool "EHCI support for PPC USB controller on OF platform bus" 158 bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a849086..a869e3c103d3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
227 * mark HW unaccessible. The PM and USB cores make sure that 227 * mark HW unaccessible. The PM and USB cores make sure that
228 * the root hub is either suspended or stopped. 228 * the root hub is either suspended or stopped.
229 */ 229 */
230 spin_lock_irqsave(&ehci->lock, flags);
231 ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); 230 ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
231 spin_lock_irqsave(&ehci->lock, flags);
232 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 232 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
233 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 233 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
234 234
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 86e42892016d..5c761df7fa83 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -52,7 +52,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
52 struct resource *res; 52 struct resource *res;
53 int irq; 53 int irq;
54 int retval; 54 int retval;
55 unsigned int temp;
56 55
57 pr_debug("initializing FSL-SOC USB Controller\n"); 56 pr_debug("initializing FSL-SOC USB Controller\n");
58 57
@@ -126,18 +125,6 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
126 goto err3; 125 goto err3;
127 } 126 }
128 127
129 /*
130 * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
131 * flag for 83xx or 8536 system interface registers.
132 */
133 if (pdata->big_endian_mmio)
134 temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
135 else
136 temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
137
138 if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
139 pdata->have_sysif_regs = 1;
140
141 /* Enable USB controller, 83xx or 8536 */ 128 /* Enable USB controller, 83xx or 8536 */
142 if (pdata->have_sysif_regs) 129 if (pdata->have_sysif_regs)
143 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); 130 setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 2c8353795226..3fabed33d940 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -19,9 +19,6 @@
19#define _EHCI_FSL_H 19#define _EHCI_FSL_H
20 20
21/* offsets for the non-ehci registers in the FSL SOC USB controller */ 21/* offsets for the non-ehci registers in the FSL SOC USB controller */
22#define FSL_SOC_USB_ID 0x0
23#define ID_MSK 0x3f
24#define NID_MSK 0x3f00
25#define FSL_SOC_USB_ULPIVP 0x170 22#define FSL_SOC_USB_ULPIVP 0x170
26#define FSL_SOC_USB_PORTSC1 0x184 23#define FSL_SOC_USB_PORTSC1 0x184
27#define PORT_PTS_MSK (3<<30) 24#define PORT_PTS_MSK (3<<30)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6fee3cd58efe..74dcf49bd015 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -572,6 +572,8 @@ static int ehci_init(struct usb_hcd *hcd)
572 ehci->iaa_watchdog.function = ehci_iaa_watchdog; 572 ehci->iaa_watchdog.function = ehci_iaa_watchdog;
573 ehci->iaa_watchdog.data = (unsigned long) ehci; 573 ehci->iaa_watchdog.data = (unsigned long) ehci;
574 574
575 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
576
575 /* 577 /*
576 * hw default: 1K periodic list heads, one per frame. 578 * hw default: 1K periodic list heads, one per frame.
577 * periodic_size can shrink by USBCMD update if hcc_params allows. 579 * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@ static int ehci_init(struct usb_hcd *hcd)
579 ehci->periodic_size = DEFAULT_I_TDPS; 581 ehci->periodic_size = DEFAULT_I_TDPS;
580 INIT_LIST_HEAD(&ehci->cached_itd_list); 582 INIT_LIST_HEAD(&ehci->cached_itd_list);
581 INIT_LIST_HEAD(&ehci->cached_sitd_list); 583 INIT_LIST_HEAD(&ehci->cached_sitd_list);
584
585 if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
586 /* periodic schedule size can be smaller than default */
587 switch (EHCI_TUNE_FLS) {
588 case 0: ehci->periodic_size = 1024; break;
589 case 1: ehci->periodic_size = 512; break;
590 case 2: ehci->periodic_size = 256; break;
591 default: BUG();
592 }
593 }
582 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 594 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
583 return retval; 595 return retval;
584 596
585 /* controllers may cache some of the periodic schedule ... */ 597 /* controllers may cache some of the periodic schedule ... */
586 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
587 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 598 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
588 ehci->i_thresh = 2 + 8; 599 ehci->i_thresh = 2 + 8;
589 else // N microframes cached 600 else // N microframes cached
@@ -637,12 +648,6 @@ static int ehci_init(struct usb_hcd *hcd)
637 /* periodic schedule size can be smaller than default */ 648 /* periodic schedule size can be smaller than default */
638 temp &= ~(3 << 2); 649 temp &= ~(3 << 2);
639 temp |= (EHCI_TUNE_FLS << 2); 650 temp |= (EHCI_TUNE_FLS << 2);
640 switch (EHCI_TUNE_FLS) {
641 case 0: ehci->periodic_size = 1024; break;
642 case 1: ehci->periodic_size = 512; break;
643 case 2: ehci->periodic_size = 256; break;
644 default: BUG();
645 }
646 } 651 }
647 if (HCC_LPM(hcc_params)) { 652 if (HCC_LPM(hcc_params)) {
648 /* support link power management EHCI 1.1 addendum */ 653 /* support link power management EHCI 1.1 addendum */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900f..8a515f0d5988 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
111{ 111{
112 int port; 112 int port;
113 u32 temp; 113 u32 temp;
114 unsigned long flags;
114 115
115 /* If remote wakeup is enabled for the root hub but disabled 116 /* If remote wakeup is enabled for the root hub but disabled
116 * for the controller, we must adjust all the port wakeup flags 117 * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
120 if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) 121 if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
121 return; 122 return;
122 123
124 spin_lock_irqsave(&ehci->lock, flags);
125
123 /* clear phy low-power mode before changing wakeup flags */ 126 /* clear phy low-power mode before changing wakeup flags */
124 if (ehci->has_hostpc) { 127 if (ehci->has_hostpc) {
125 port = HCS_N_PORTS(ehci->hcs_params); 128 port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
131 temp = ehci_readl(ehci, hostpc_reg); 134 temp = ehci_readl(ehci, hostpc_reg);
132 ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); 135 ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
133 } 136 }
137 spin_unlock_irqrestore(&ehci->lock, flags);
134 msleep(5); 138 msleep(5);
139 spin_lock_irqsave(&ehci->lock, flags);
135 } 140 }
136 141
137 port = HCS_N_PORTS(ehci->hcs_params); 142 port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
170 /* Does the root hub have a port wakeup pending? */ 175 /* Does the root hub have a port wakeup pending? */
171 if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) 176 if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
172 usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); 177 usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
178
179 spin_unlock_irqrestore(&ehci->lock, flags);
173} 180}
174 181
175static int ehci_bus_suspend (struct usb_hcd *hcd) 182static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index fa59b26fc5bc..c8e360d7d975 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -21,10 +21,13 @@
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/usb/otg.h> 23#include <linux/usb/otg.h>
24#include <linux/usb/ulpi.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25 26
26#include <mach/mxc_ehci.h> 27#include <mach/mxc_ehci.h>
27 28
29#include <asm/mach-types.h>
30
28#define ULPI_VIEWPORT_OFFSET 0x170 31#define ULPI_VIEWPORT_OFFSET 0x170
29 32
30struct ehci_mxc_priv { 33struct ehci_mxc_priv {
@@ -114,6 +117,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
114 struct usb_hcd *hcd; 117 struct usb_hcd *hcd;
115 struct resource *res; 118 struct resource *res;
116 int irq, ret; 119 int irq, ret;
120 unsigned int flags;
117 struct ehci_mxc_priv *priv; 121 struct ehci_mxc_priv *priv;
118 struct device *dev = &pdev->dev; 122 struct device *dev = &pdev->dev;
119 struct ehci_hcd *ehci; 123 struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
177 clk_enable(priv->ahbclk); 181 clk_enable(priv->ahbclk);
178 } 182 }
179 183
180 /* "dr" device has its own clock */ 184 /* "dr" device has its own clock on i.MX51 */
181 if (pdev->id == 0) { 185 if (cpu_is_mx51() && (pdev->id == 0)) {
182 priv->phy1clk = clk_get(dev, "usb_phy1"); 186 priv->phy1clk = clk_get(dev, "usb_phy1");
183 if (IS_ERR(priv->phy1clk)) { 187 if (IS_ERR(priv->phy1clk)) {
184 ret = PTR_ERR(priv->phy1clk); 188 ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
240 if (ret) 244 if (ret)
241 goto err_add; 245 goto err_add;
242 246
247 if (pdata->otg) {
248 /*
249 * efikamx and efikasb have some hardware bug which is
250 * preventing usb to work unless CHRGVBUS is set.
251 * It's in violation of USB specs
252 */
253 if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
254 flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
255 flags |= ULPI_OTG_CTRL_CHRGVBUS;
256 ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
257 if (ret) {
258 dev_err(dev, "unable to set CHRVBUS\n");
259 goto err_add;
260 }
261 }
262 }
263
243 return 0; 264 return 0;
244 265
245err_add: 266err_add:
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef4e59f..f784ceb862a3 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
796 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, 796 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
797 dev_name(&pdev->dev)); 797 dev_name(&pdev->dev));
798 if (!hcd) { 798 if (!hcd) {
799 dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); 799 dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
800 ret = -ENOMEM; 800 ret = -ENOMEM;
801 goto err_create_hcd; 801 goto err_create_hcd;
802 } 802 }
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
864 864
865 ret = omap_start_ehc(omap, hcd); 865 ret = omap_start_ehc(omap, hcd);
866 if (ret) { 866 if (ret) {
867 dev_dbg(&pdev->dev, "failed to start ehci\n"); 867 dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
868 goto err_start; 868 goto err_start;
869 } 869 }
870 870
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
879 879
880 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 880 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
881 if (ret) { 881 if (ret) {
882 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); 882 dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
883 goto err_add_hcd; 883 goto err_add_hcd;
884 } 884 }
885 885
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 76179c39c0e3..07bb982e59f6 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -44,28 +44,35 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
44 return 0; 44 return 0;
45} 45}
46 46
47static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci) 47static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
48{ 48{
49 struct pci_dev *amd_smbus_dev; 49 struct pci_dev *amd_smbus_dev;
50 u8 rev = 0; 50 u8 rev = 0;
51 51
52 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); 52 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
53 if (!amd_smbus_dev) 53 if (amd_smbus_dev) {
54 return 0; 54 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
55 55 if (rev < 0x40) {
56 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); 56 pci_dev_put(amd_smbus_dev);
57 if (rev < 0x40) { 57 amd_smbus_dev = NULL;
58 pci_dev_put(amd_smbus_dev); 58 return 0;
59 amd_smbus_dev = NULL; 59 }
60 return 0; 60 } else {
61 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
62 if (!amd_smbus_dev)
63 return 0;
64 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
65 if (rev < 0x11 || rev > 0x18) {
66 pci_dev_put(amd_smbus_dev);
67 amd_smbus_dev = NULL;
68 return 0;
69 }
61 } 70 }
62 71
63 if (!amd_nb_dev) 72 if (!amd_nb_dev)
64 amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); 73 amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
65 if (!amd_nb_dev)
66 ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
67 74
68 ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n"); 75 ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
69 76
70 pci_dev_put(amd_smbus_dev); 77 pci_dev_put(amd_smbus_dev);
71 amd_smbus_dev = NULL; 78 amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
131 /* cache this readonly data; minimize chip reads */ 138 /* cache this readonly data; minimize chip reads */
132 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); 139 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
133 140
134 if (ehci_quirk_amd_SB800(ehci)) 141 if (ehci_quirk_amd_hudson(ehci))
135 ehci->amd_l1_fix = 1; 142 ehci->amd_l1_fix = 1;
136 143
137 retval = ehci_halt(ehci); 144 retval = ehci_halt(ehci);
@@ -360,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
360 * mark HW unaccessible. The PM and USB cores make sure that 367 * mark HW unaccessible. The PM and USB cores make sure that
361 * the root hub is either suspended or stopped. 368 * the root hub is either suspended or stopped.
362 */ 369 */
363 spin_lock_irqsave (&ehci->lock, flags);
364 ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); 370 ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
371 spin_lock_irqsave (&ehci->lock, flags);
365 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 372 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
366 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 373 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
367 374
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 574b99ea0700..79a66d622f9c 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -262,19 +262,24 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
262 } 262 }
263} 263}
264 264
265struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = { 265static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
266 .big_endian_desc = 1, 266 .big_endian_desc = 1,
267 .big_endian_mmio = 1, 267 .big_endian_mmio = 1,
268 .es = 1, 268 .es = 1,
269 .have_sysif_regs = 0,
269 .le_setup_buf = 1, 270 .le_setup_buf = 1,
270 .init = fsl_usb2_mpc5121_init, 271 .init = fsl_usb2_mpc5121_init,
271 .exit = fsl_usb2_mpc5121_exit, 272 .exit = fsl_usb2_mpc5121_exit,
272}; 273};
273#endif /* CONFIG_PPC_MPC512x */ 274#endif /* CONFIG_PPC_MPC512x */
274 275
276static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
277 .have_sysif_regs = 1,
278};
279
275static const struct of_device_id fsl_usb2_mph_dr_of_match[] = { 280static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
276 { .compatible = "fsl-usb2-mph", }, 281 { .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
277 { .compatible = "fsl-usb2-dr", }, 282 { .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
278#ifdef CONFIG_PPC_MPC512x 283#ifdef CONFIG_PPC_MPC512x
279 { .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, }, 284 { .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
280#endif 285#endif
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b89eaa..2e9602a10e9b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue(
861 DBG("dev %d ep%d maxpacket %d\n", 861 DBG("dev %d ep%d maxpacket %d\n",
862 udev->devnum, epnum, ep->maxpacket); 862 udev->devnum, epnum, ep->maxpacket);
863 retval = -EINVAL; 863 retval = -EINVAL;
864 kfree(ep);
864 goto fail; 865 goto fail;
865 } 866 }
866 867
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index df558f6f84e3..3e8211c1ce5a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
308/* Ring the host controller doorbell after placing a command on the ring */ 308/* Ring the host controller doorbell after placing a command on the ring */
309void xhci_ring_cmd_db(struct xhci_hcd *xhci) 309void xhci_ring_cmd_db(struct xhci_hcd *xhci)
310{ 310{
311 u32 temp;
312
313 xhci_dbg(xhci, "// Ding dong!\n"); 311 xhci_dbg(xhci, "// Ding dong!\n");
314 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; 312 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
315 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
316 /* Flush PCI posted writes */ 313 /* Flush PCI posted writes */
317 xhci_readl(xhci, &xhci->dba->doorbell[0]); 314 xhci_readl(xhci, &xhci->dba->doorbell[0]);
318} 315}
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
322 unsigned int ep_index, 319 unsigned int ep_index,
323 unsigned int stream_id) 320 unsigned int stream_id)
324{ 321{
325 struct xhci_virt_ep *ep;
326 unsigned int ep_state;
327 u32 field;
328 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; 322 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
323 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
324 unsigned int ep_state = ep->ep_state;
329 325
330 ep = &xhci->devs[slot_id]->eps[ep_index];
331 ep_state = ep->ep_state;
332 /* Don't ring the doorbell for this endpoint if there are pending 326 /* Don't ring the doorbell for this endpoint if there are pending
333 * cancellations because the we don't want to interrupt processing. 327 * cancellations because we don't want to interrupt processing.
334 * We don't want to restart any stream rings if there's a set dequeue 328 * We don't want to restart any stream rings if there's a set dequeue
335 * pointer command pending because the device can choose to start any 329 * pointer command pending because the device can choose to start any
336 * stream once the endpoint is on the HW schedule. 330 * stream once the endpoint is on the HW schedule.
337 * FIXME - check all the stream rings for pending cancellations. 331 * FIXME - check all the stream rings for pending cancellations.
338 */ 332 */
339 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 333 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
340 && !(ep_state & EP_HALTED)) { 334 (ep_state & EP_HALTED))
341 field = xhci_readl(xhci, db_addr) & DB_MASK; 335 return;
342 field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); 336 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
343 xhci_writel(xhci, field, db_addr); 337 /* The CPU has better things to do at this point than wait for a
344 } 338 * write-posting flush. It'll get there soon enough.
339 */
345} 340}
346 341
347/* Ring the doorbell for any rings with pending URBs */ 342/* Ring the doorbell for any rings with pending URBs */
@@ -1188,7 +1183,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1188 1183
1189 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1); 1184 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
1190 temp = xhci_readl(xhci, addr); 1185 temp = xhci_readl(xhci, addr);
1191 if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) { 1186 if (hcd->state == HC_STATE_SUSPENDED) {
1192 xhci_dbg(xhci, "resume root hub\n"); 1187 xhci_dbg(xhci, "resume root hub\n");
1193 usb_hcd_resume_root_hub(hcd); 1188 usb_hcd_resume_root_hub(hcd);
1194 } 1189 }
@@ -1710,8 +1705,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1710 /* Others already handled above */ 1705 /* Others already handled above */
1711 break; 1706 break;
1712 } 1707 }
1713 dev_dbg(&td->urb->dev->dev, 1708 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1714 "ep %#x - asked for %d bytes, "
1715 "%d bytes untransferred\n", 1709 "%d bytes untransferred\n",
1716 td->urb->ep->desc.bEndpointAddress, 1710 td->urb->ep->desc.bEndpointAddress,
1717 td->urb->transfer_buffer_length, 1711 td->urb->transfer_buffer_length,
@@ -2389,7 +2383,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2389 } 2383 }
2390 xhci_dbg(xhci, "\n"); 2384 xhci_dbg(xhci, "\n");
2391 if (!in_interrupt()) 2385 if (!in_interrupt())
2392 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", 2386 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2387 "num_trbs = %d\n",
2393 urb->ep->desc.bEndpointAddress, 2388 urb->ep->desc.bEndpointAddress,
2394 urb->transfer_buffer_length, 2389 urb->transfer_buffer_length,
2395 num_trbs); 2390 num_trbs);
@@ -2414,14 +2409,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2414 2409
2415static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 2410static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2416 unsigned int ep_index, unsigned int stream_id, int start_cycle, 2411 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2417 struct xhci_generic_trb *start_trb, struct xhci_td *td) 2412 struct xhci_generic_trb *start_trb)
2418{ 2413{
2419 /* 2414 /*
2420 * Pass all the TRBs to the hardware at once and make sure this write 2415 * Pass all the TRBs to the hardware at once and make sure this write
2421 * isn't reordered. 2416 * isn't reordered.
2422 */ 2417 */
2423 wmb(); 2418 wmb();
2424 start_trb->field[3] |= start_cycle; 2419 if (start_cycle)
2420 start_trb->field[3] |= start_cycle;
2421 else
2422 start_trb->field[3] &= ~0x1;
2425 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 2423 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2426} 2424}
2427 2425
@@ -2449,7 +2447,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2449 * to set the polling interval (once the API is added). 2447 * to set the polling interval (once the API is added).
2450 */ 2448 */
2451 if (xhci_interval != ep_interval) { 2449 if (xhci_interval != ep_interval) {
2452 if (!printk_ratelimit()) 2450 if (printk_ratelimit())
2453 dev_dbg(&urb->dev->dev, "Driver uses different interval" 2451 dev_dbg(&urb->dev->dev, "Driver uses different interval"
2454 " (%d microframe%s) than xHCI " 2452 " (%d microframe%s) than xHCI "
2455 "(%d microframe%s)\n", 2453 "(%d microframe%s)\n",
@@ -2551,9 +2549,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2551 u32 remainder = 0; 2549 u32 remainder = 0;
2552 2550
2553 /* Don't change the cycle bit of the first TRB until later */ 2551 /* Don't change the cycle bit of the first TRB until later */
2554 if (first_trb) 2552 if (first_trb) {
2555 first_trb = false; 2553 first_trb = false;
2556 else 2554 if (start_cycle == 0)
2555 field |= 0x1;
2556 } else
2557 field |= ep_ring->cycle_state; 2557 field |= ep_ring->cycle_state;
2558 2558
2559 /* Chain all the TRBs together; clear the chain bit in the last 2559 /* Chain all the TRBs together; clear the chain bit in the last
@@ -2625,7 +2625,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2625 2625
2626 check_trb_math(urb, num_trbs, running_total); 2626 check_trb_math(urb, num_trbs, running_total);
2627 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2627 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2628 start_cycle, start_trb, td); 2628 start_cycle, start_trb);
2629 return 0; 2629 return 0;
2630} 2630}
2631 2631
@@ -2671,7 +2671,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2671 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ 2671 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2672 2672
2673 if (!in_interrupt()) 2673 if (!in_interrupt())
2674 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", 2674 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
2675 "addr = %#llx, num_trbs = %d\n",
2675 urb->ep->desc.bEndpointAddress, 2676 urb->ep->desc.bEndpointAddress,
2676 urb->transfer_buffer_length, 2677 urb->transfer_buffer_length,
2677 urb->transfer_buffer_length, 2678 urb->transfer_buffer_length,
@@ -2711,9 +2712,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2711 field = 0; 2712 field = 0;
2712 2713
2713 /* Don't change the cycle bit of the first TRB until later */ 2714 /* Don't change the cycle bit of the first TRB until later */
2714 if (first_trb) 2715 if (first_trb) {
2715 first_trb = false; 2716 first_trb = false;
2716 else 2717 if (start_cycle == 0)
2718 field |= 0x1;
2719 } else
2717 field |= ep_ring->cycle_state; 2720 field |= ep_ring->cycle_state;
2718 2721
2719 /* Chain all the TRBs together; clear the chain bit in the last 2722 /* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2760,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2757 2760
2758 check_trb_math(urb, num_trbs, running_total); 2761 check_trb_math(urb, num_trbs, running_total);
2759 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2762 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2760 start_cycle, start_trb, td); 2763 start_cycle, start_trb);
2761 return 0; 2764 return 0;
2762} 2765}
2763 2766
@@ -2818,13 +2821,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2818 /* Queue setup TRB - see section 6.4.1.2.1 */ 2821 /* Queue setup TRB - see section 6.4.1.2.1 */
2819 /* FIXME better way to translate setup_packet into two u32 fields? */ 2822 /* FIXME better way to translate setup_packet into two u32 fields? */
2820 setup = (struct usb_ctrlrequest *) urb->setup_packet; 2823 setup = (struct usb_ctrlrequest *) urb->setup_packet;
2824 field = 0;
2825 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
2826 if (start_cycle == 0)
2827 field |= 0x1;
2821 queue_trb(xhci, ep_ring, false, true, 2828 queue_trb(xhci, ep_ring, false, true,
2822 /* FIXME endianness is probably going to bite my ass here. */ 2829 /* FIXME endianness is probably going to bite my ass here. */
2823 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 2830 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
2824 setup->wIndex | setup->wLength << 16, 2831 setup->wIndex | setup->wLength << 16,
2825 TRB_LEN(8) | TRB_INTR_TARGET(0), 2832 TRB_LEN(8) | TRB_INTR_TARGET(0),
2826 /* Immediate data in pointer */ 2833 /* Immediate data in pointer */
2827 TRB_IDT | TRB_TYPE(TRB_SETUP)); 2834 field);
2828 2835
2829 /* If there's data, queue data TRBs */ 2836 /* If there's data, queue data TRBs */
2830 field = 0; 2837 field = 0;
@@ -2859,7 +2866,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2859 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2866 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
2860 2867
2861 giveback_first_trb(xhci, slot_id, ep_index, 0, 2868 giveback_first_trb(xhci, slot_id, ep_index, 0,
2862 start_cycle, start_trb, td); 2869 start_cycle, start_trb);
2863 return 0; 2870 return 0;
2864} 2871}
2865 2872
@@ -2900,6 +2907,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2900 int running_total, trb_buff_len, td_len, td_remain_len, ret; 2907 int running_total, trb_buff_len, td_len, td_remain_len, ret;
2901 u64 start_addr, addr; 2908 u64 start_addr, addr;
2902 int i, j; 2909 int i, j;
2910 bool more_trbs_coming;
2903 2911
2904 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2912 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
2905 2913
@@ -2910,7 +2918,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2910 } 2918 }
2911 2919
2912 if (!in_interrupt()) 2920 if (!in_interrupt())
2913 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d)," 2921 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
2914 " addr = %#llx, num_tds = %d\n", 2922 " addr = %#llx, num_tds = %d\n",
2915 urb->ep->desc.bEndpointAddress, 2923 urb->ep->desc.bEndpointAddress,
2916 urb->transfer_buffer_length, 2924 urb->transfer_buffer_length,
@@ -2950,7 +2958,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2950 field |= TRB_TYPE(TRB_ISOC); 2958 field |= TRB_TYPE(TRB_ISOC);
2951 /* Assume URB_ISO_ASAP is set */ 2959 /* Assume URB_ISO_ASAP is set */
2952 field |= TRB_SIA; 2960 field |= TRB_SIA;
2953 if (i > 0) 2961 if (i == 0) {
2962 if (start_cycle == 0)
2963 field |= 0x1;
2964 } else
2954 field |= ep_ring->cycle_state; 2965 field |= ep_ring->cycle_state;
2955 first_trb = false; 2966 first_trb = false;
2956 } else { 2967 } else {
@@ -2965,9 +2976,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2965 */ 2976 */
2966 if (j < trbs_per_td - 1) { 2977 if (j < trbs_per_td - 1) {
2967 field |= TRB_CHAIN; 2978 field |= TRB_CHAIN;
2979 more_trbs_coming = true;
2968 } else { 2980 } else {
2969 td->last_trb = ep_ring->enqueue; 2981 td->last_trb = ep_ring->enqueue;
2970 field |= TRB_IOC; 2982 field |= TRB_IOC;
2983 more_trbs_coming = false;
2971 } 2984 }
2972 2985
2973 /* Calculate TRB length */ 2986 /* Calculate TRB length */
@@ -2980,7 +2993,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2980 length_field = TRB_LEN(trb_buff_len) | 2993 length_field = TRB_LEN(trb_buff_len) |
2981 remainder | 2994 remainder |
2982 TRB_INTR_TARGET(0); 2995 TRB_INTR_TARGET(0);
2983 queue_trb(xhci, ep_ring, false, false, 2996 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2984 lower_32_bits(addr), 2997 lower_32_bits(addr),
2985 upper_32_bits(addr), 2998 upper_32_bits(addr),
2986 length_field, 2999 length_field,
@@ -3003,10 +3016,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3003 } 3016 }
3004 } 3017 }
3005 3018
3006 wmb(); 3019 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3007 start_trb->field[3] |= start_cycle; 3020 start_cycle, start_trb);
3008
3009 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
3010 return 0; 3021 return 0;
3011} 3022}
3012 3023
@@ -3064,7 +3075,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3064 * to set the polling interval (once the API is added). 3075 * to set the polling interval (once the API is added).
3065 */ 3076 */
3066 if (xhci_interval != ep_interval) { 3077 if (xhci_interval != ep_interval) {
3067 if (!printk_ratelimit()) 3078 if (printk_ratelimit())
3068 dev_dbg(&urb->dev->dev, "Driver uses different interval" 3079 dev_dbg(&urb->dev->dev, "Driver uses different interval"
3069 " (%d microframe%s) than xHCI " 3080 " (%d microframe%s) than xHCI "
3070 "(%d microframe%s)\n", 3081 "(%d microframe%s)\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e4a3108cc3..34cf4e165877 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
226static int xhci_setup_msix(struct xhci_hcd *xhci) 226static int xhci_setup_msix(struct xhci_hcd *xhci)
227{ 227{
228 int i, ret = 0; 228 int i, ret = 0;
229 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 229 struct usb_hcd *hcd = xhci_to_hcd(xhci);
230 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
230 231
231 /* 232 /*
232 * calculate number of msi-x vectors supported. 233 * calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
265 goto disable_msix; 266 goto disable_msix;
266 } 267 }
267 268
269 hcd->msix_enabled = 1;
268 return ret; 270 return ret;
269 271
270disable_msix: 272disable_msix:
@@ -280,7 +282,8 @@ free_entries:
280/* Free any IRQs and disable MSI-X */ 282/* Free any IRQs and disable MSI-X */
281static void xhci_cleanup_msix(struct xhci_hcd *xhci) 283static void xhci_cleanup_msix(struct xhci_hcd *xhci)
282{ 284{
283 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 285 struct usb_hcd *hcd = xhci_to_hcd(xhci);
286 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
284 287
285 xhci_free_irq(xhci); 288 xhci_free_irq(xhci);
286 289
@@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
292 pci_disable_msi(pdev); 295 pci_disable_msi(pdev);
293 } 296 }
294 297
298 hcd->msix_enabled = 0;
295 return; 299 return;
296} 300}
297 301
@@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd)
508 spin_lock_irq(&xhci->lock); 512 spin_lock_irq(&xhci->lock);
509 xhci_halt(xhci); 513 xhci_halt(xhci);
510 xhci_reset(xhci); 514 xhci_reset(xhci);
511 xhci_cleanup_msix(xhci);
512 spin_unlock_irq(&xhci->lock); 515 spin_unlock_irq(&xhci->lock);
513 516
517 xhci_cleanup_msix(xhci);
518
514#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 519#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
515 /* Tell the event ring poll function not to reschedule */ 520 /* Tell the event ring poll function not to reschedule */
516 xhci->zombie = 1; 521 xhci->zombie = 1;
@@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
544 549
545 spin_lock_irq(&xhci->lock); 550 spin_lock_irq(&xhci->lock);
546 xhci_halt(xhci); 551 xhci_halt(xhci);
547 xhci_cleanup_msix(xhci);
548 spin_unlock_irq(&xhci->lock); 552 spin_unlock_irq(&xhci->lock);
549 553
554 xhci_cleanup_msix(xhci);
555
550 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 556 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
551 xhci_readl(xhci, &xhci->op_regs->status)); 557 xhci_readl(xhci, &xhci->op_regs->status));
552} 558}
@@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
647 int rc = 0; 653 int rc = 0;
648 struct usb_hcd *hcd = xhci_to_hcd(xhci); 654 struct usb_hcd *hcd = xhci_to_hcd(xhci);
649 u32 command; 655 u32 command;
656 int i;
650 657
651 spin_lock_irq(&xhci->lock); 658 spin_lock_irq(&xhci->lock);
652 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 659 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci)
677 spin_unlock_irq(&xhci->lock); 684 spin_unlock_irq(&xhci->lock);
678 return -ETIMEDOUT; 685 return -ETIMEDOUT;
679 } 686 }
680 /* step 5: remove core well power */
681 xhci_cleanup_msix(xhci);
682 spin_unlock_irq(&xhci->lock); 687 spin_unlock_irq(&xhci->lock);
683 688
689 /* step 5: remove core well power */
690 /* synchronize irq when using MSI-X */
691 if (xhci->msix_entries) {
692 for (i = 0; i < xhci->msix_count; i++)
693 synchronize_irq(xhci->msix_entries[i].vector);
694 }
695
684 return rc; 696 return rc;
685} 697}
686 698
@@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
694{ 706{
695 u32 command, temp = 0; 707 u32 command, temp = 0;
696 struct usb_hcd *hcd = xhci_to_hcd(xhci); 708 struct usb_hcd *hcd = xhci_to_hcd(xhci);
697 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
698 int old_state, retval; 709 int old_state, retval;
699 710
700 old_state = hcd->state; 711 old_state = hcd->state;
@@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
729 xhci_dbg(xhci, "Stop HCD\n"); 740 xhci_dbg(xhci, "Stop HCD\n");
730 xhci_halt(xhci); 741 xhci_halt(xhci);
731 xhci_reset(xhci); 742 xhci_reset(xhci);
732 if (hibernated)
733 xhci_cleanup_msix(xhci);
734 spin_unlock_irq(&xhci->lock); 743 spin_unlock_irq(&xhci->lock);
744 xhci_cleanup_msix(xhci);
735 745
736#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 746#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
737 /* Tell the event ring poll function not to reschedule */ 747 /* Tell the event ring poll function not to reschedule */
@@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
765 return retval; 775 return retval;
766 } 776 }
767 777
768 spin_unlock_irq(&xhci->lock);
769 /* Re-setup MSI-X */
770 if (hcd->irq)
771 free_irq(hcd->irq, hcd);
772 hcd->irq = -1;
773
774 retval = xhci_setup_msix(xhci);
775 if (retval)
776 /* fall back to msi*/
777 retval = xhci_setup_msi(xhci);
778
779 if (retval) {
780 /* fall back to legacy interrupt*/
781 retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
782 hcd->irq_descr, hcd);
783 if (retval) {
784 xhci_err(xhci, "request interrupt %d failed\n",
785 pdev->irq);
786 return retval;
787 }
788 hcd->irq = pdev->irq;
789 }
790
791 spin_lock_irq(&xhci->lock);
792 /* step 4: set Run/Stop bit */ 778 /* step 4: set Run/Stop bit */
793 command = xhci_readl(xhci, &xhci->op_regs->command); 779 command = xhci_readl(xhci, &xhci->op_regs->command);
794 command |= CMD_RUN; 780 command |= CMD_RUN;
@@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2445 xhci_err(xhci, "Error while assigning device slot ID\n"); 2431 xhci_err(xhci, "Error while assigning device slot ID\n");
2446 return 0; 2432 return 0;
2447 } 2433 }
2448 /* xhci_alloc_virt_device() does not touch rings; no need to lock */ 2434 /* xhci_alloc_virt_device() does not touch rings; no need to lock.
2449 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 2435 * Use GFP_NOIO, since this function can be called from
2436 * xhci_discover_or_reset_device(), which may be called as part of
2437 * mass storage driver error handling.
2438 */
2439 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2450 /* Disable slot, if we can do it without mem alloc */ 2440 /* Disable slot, if we can do it without mem alloc */
2451 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 2441 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2452 spin_lock_irqsave(&xhci->lock, flags); 2442 spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 170c367112d2..7f236fd22015 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -436,22 +436,18 @@ struct xhci_run_regs {
436/** 436/**
437 * struct doorbell_array 437 * struct doorbell_array
438 * 438 *
439 * Bits 0 - 7: Endpoint target
440 * Bits 8 - 15: RsvdZ
441 * Bits 16 - 31: Stream ID
442 *
439 * Section 5.6 443 * Section 5.6
440 */ 444 */
441struct xhci_doorbell_array { 445struct xhci_doorbell_array {
442 u32 doorbell[256]; 446 u32 doorbell[256];
443}; 447};
444 448
445#define DB_TARGET_MASK 0xFFFFFF00 449#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
446#define DB_STREAM_ID_MASK 0x0000FFFF 450#define DB_VALUE_HOST 0x00000000
447#define DB_TARGET_HOST 0x0
448#define DB_STREAM_ID_HOST 0x0
449#define DB_MASK (0xff << 8)
450
451/* Endpoint Target - bits 0:7 */
452#define EPI_TO_DB(p) (((p) + 1) & 0xff)
453#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
454
455 451
456/** 452/**
457 * struct xhci_protocol_caps 453 * struct xhci_protocol_caps
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 1732d9bc097e..1616ad1793a4 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -45,7 +45,7 @@ struct usb_led {
45 45
46static void change_color(struct usb_led *led) 46static void change_color(struct usb_led *led)
47{ 47{
48 int retval; 48 int retval = 0;
49 unsigned char *buffer; 49 unsigned char *buffer;
50 50
51 buffer = kmalloc(8, GFP_KERNEL); 51 buffer = kmalloc(8, GFP_KERNEL);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 4ff21587ab03..f7a205738032 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -776,7 +776,6 @@ static const struct usb_device_id uss720_table[] = {
776 { USB_DEVICE(0x0557, 0x2001) }, 776 { USB_DEVICE(0x0557, 0x2001) },
777 { USB_DEVICE(0x0729, 0x1284) }, 777 { USB_DEVICE(0x0729, 0x1284) },
778 { USB_DEVICE(0x1293, 0x0002) }, 778 { USB_DEVICE(0x1293, 0x0002) },
779 { USB_DEVICE(0x1293, 0x0002) },
780 { USB_DEVICE(0x050d, 0x0002) }, 779 { USB_DEVICE(0x050d, 0x0002) },
781 { } /* Terminating entry */ 780 { } /* Terminating entry */
782}; 781};
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228eb2af..9d49d1cd7ce2 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
404 musb->xceiv->set_power = bfin_musb_set_power; 404 musb->xceiv->set_power = bfin_musb_set_power;
405 405
406 musb->isr = blackfin_interrupt; 406 musb->isr = blackfin_interrupt;
407 musb->double_buffer_not_ok = true;
407 408
408 return 0; 409 return 0;
409} 410}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394e491b..54a8bd1047d6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
128 128
129static inline struct musb *dev_to_musb(struct device *dev) 129static inline struct musb *dev_to_musb(struct device *dev)
130{ 130{
131#ifdef CONFIG_USB_MUSB_HDRC_HCD
132 /* usbcore insists dev->driver_data is a "struct hcd *" */
133 return hcd_to_musb(dev_get_drvdata(dev));
134#else
135 return dev_get_drvdata(dev); 131 return dev_get_drvdata(dev);
136#endif
137} 132}
138 133
139/*-------------------------------------------------------------------------*/ 134/*-------------------------------------------------------------------------*/
@@ -1876,10 +1871,9 @@ allocate_instance(struct device *dev,
1876 musb = kzalloc(sizeof *musb, GFP_KERNEL); 1871 musb = kzalloc(sizeof *musb, GFP_KERNEL);
1877 if (!musb) 1872 if (!musb)
1878 return NULL; 1873 return NULL;
1879 dev_set_drvdata(dev, musb);
1880 1874
1881#endif 1875#endif
1882 1876 dev_set_drvdata(dev, musb);
1883 musb->mregs = mbase; 1877 musb->mregs = mbase;
1884 musb->ctrl_base = mbase; 1878 musb->ctrl_base = mbase;
1885 musb->nIrq = -ENODEV; 1879 musb->nIrq = -ENODEV;
@@ -2191,7 +2185,7 @@ static int __init musb_probe(struct platform_device *pdev)
2191 void __iomem *base; 2185 void __iomem *base;
2192 2186
2193 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2187 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2194 if (!iomem || irq == 0) 2188 if (!iomem || irq <= 0)
2195 return -ENODEV; 2189 return -ENODEV;
2196 2190
2197 base = ioremap(iomem->start, resource_size(iomem)); 2191 base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f8e191..d74a8113ae74 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,18 @@ struct musb {
488 unsigned set_address:1; 488 unsigned set_address:1;
489 unsigned test_mode:1; 489 unsigned test_mode:1;
490 unsigned softconnect:1; 490 unsigned softconnect:1;
491 /*
492 * FIXME: Remove this flag.
493 *
494 * This is only added to allow Blackfin to work
495 * with current driver. For some unknown reason
496 * Blackfin doesn't work with double buffering
497 * and that's enabled by default.
498 *
499 * We added this flag to forcefully disable double
500 * buffering until we get it working.
501 */
502 unsigned double_buffer_not_ok:1 __deprecated;
491 503
492 u8 address; 504 u8 address;
493 u8 test_mode_nr; 505 u8 test_mode_nr;
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065ba9e70..3a97c4e2d4f5 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@ struct dma_controller {
169 dma_addr_t dma_addr, 169 dma_addr_t dma_addr,
170 u32 length); 170 u32 length);
171 int (*channel_abort)(struct dma_channel *); 171 int (*channel_abort)(struct dma_channel *);
172 int (*is_compatible)(struct dma_channel *channel,
173 u16 maxpacket,
174 void *buf, u32 length);
172}; 175};
173 176
174/* called after channel_program(), may indicate a fault */ 177/* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ed58c6c8f15c..2fe304611dcf 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
92 92
93/* ----------------------------------------------------------------------- */ 93/* ----------------------------------------------------------------------- */
94 94
95#define is_buffer_mapped(req) (is_dma_capable() && \
96 (req->map_state != UN_MAPPED))
97
95/* Maps the buffer to dma */ 98/* Maps the buffer to dma */
96 99
97static inline void map_dma_buffer(struct musb_request *request, 100static inline void map_dma_buffer(struct musb_request *request,
98 struct musb *musb) 101 struct musb *musb, struct musb_ep *musb_ep)
99{ 102{
103 int compatible = true;
104 struct dma_controller *dma = musb->dma_controller;
105
106 request->map_state = UN_MAPPED;
107
108 if (!is_dma_capable() || !musb_ep->dma)
109 return;
110
111 /* Check if DMA engine can handle this request.
112 * DMA code must reject the USB request explicitly.
113 * Default behaviour is to map the request.
114 */
115 if (dma->is_compatible)
116 compatible = dma->is_compatible(musb_ep->dma,
117 musb_ep->packet_sz, request->request.buf,
118 request->request.length);
119 if (!compatible)
120 return;
121
100 if (request->request.dma == DMA_ADDR_INVALID) { 122 if (request->request.dma == DMA_ADDR_INVALID) {
101 request->request.dma = dma_map_single( 123 request->request.dma = dma_map_single(
102 musb->controller, 124 musb->controller,
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
105 request->tx 127 request->tx
106 ? DMA_TO_DEVICE 128 ? DMA_TO_DEVICE
107 : DMA_FROM_DEVICE); 129 : DMA_FROM_DEVICE);
108 request->mapped = 1; 130 request->map_state = MUSB_MAPPED;
109 } else { 131 } else {
110 dma_sync_single_for_device(musb->controller, 132 dma_sync_single_for_device(musb->controller,
111 request->request.dma, 133 request->request.dma,
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
113 request->tx 135 request->tx
114 ? DMA_TO_DEVICE 136 ? DMA_TO_DEVICE
115 : DMA_FROM_DEVICE); 137 : DMA_FROM_DEVICE);
116 request->mapped = 0; 138 request->map_state = PRE_MAPPED;
117 } 139 }
118} 140}
119 141
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
121static inline void unmap_dma_buffer(struct musb_request *request, 143static inline void unmap_dma_buffer(struct musb_request *request,
122 struct musb *musb) 144 struct musb *musb)
123{ 145{
146 if (!is_buffer_mapped(request))
147 return;
148
124 if (request->request.dma == DMA_ADDR_INVALID) { 149 if (request->request.dma == DMA_ADDR_INVALID) {
125 DBG(20, "not unmapping a never mapped buffer\n"); 150 DBG(20, "not unmapping a never mapped buffer\n");
126 return; 151 return;
127 } 152 }
128 if (request->mapped) { 153 if (request->map_state == MUSB_MAPPED) {
129 dma_unmap_single(musb->controller, 154 dma_unmap_single(musb->controller,
130 request->request.dma, 155 request->request.dma,
131 request->request.length, 156 request->request.length,
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
133 ? DMA_TO_DEVICE 158 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE); 159 : DMA_FROM_DEVICE);
135 request->request.dma = DMA_ADDR_INVALID; 160 request->request.dma = DMA_ADDR_INVALID;
136 request->mapped = 0; 161 } else { /* PRE_MAPPED */
137 } else {
138 dma_sync_single_for_cpu(musb->controller, 162 dma_sync_single_for_cpu(musb->controller,
139 request->request.dma, 163 request->request.dma,
140 request->request.length, 164 request->request.length,
141 request->tx 165 request->tx
142 ? DMA_TO_DEVICE 166 ? DMA_TO_DEVICE
143 : DMA_FROM_DEVICE); 167 : DMA_FROM_DEVICE);
144
145 } 168 }
169 request->map_state = UN_MAPPED;
146} 170}
147 171
148/* 172/*
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
172 196
173 ep->busy = 1; 197 ep->busy = 1;
174 spin_unlock(&musb->lock); 198 spin_unlock(&musb->lock);
175 if (is_dma_capable() && ep->dma) 199 unmap_dma_buffer(req, musb);
176 unmap_dma_buffer(req, musb);
177 if (request->status == 0) 200 if (request->status == 0)
178 DBG(5, "%s done request %p, %d/%d\n", 201 DBG(5, "%s done request %p, %d/%d\n",
179 ep->end_point.name, request, 202 ep->end_point.name, request,
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
335 csr); 358 csr);
336 359
337#ifndef CONFIG_MUSB_PIO_ONLY 360#ifndef CONFIG_MUSB_PIO_ONLY
338 if (is_dma_capable() && musb_ep->dma) { 361 if (is_buffer_mapped(req)) {
339 struct dma_controller *c = musb->dma_controller; 362 struct dma_controller *c = musb->dma_controller;
340 size_t request_size; 363 size_t request_size;
341 364
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
436 * Unmap the dma buffer back to cpu if dma channel 459 * Unmap the dma buffer back to cpu if dma channel
437 * programming fails 460 * programming fails
438 */ 461 */
439 if (is_dma_capable() && musb_ep->dma) 462 unmap_dma_buffer(req, musb);
440 unmap_dma_buffer(req, musb);
441 463
442 musb_write_fifo(musb_ep->hw_ep, fifo_count, 464 musb_write_fifo(musb_ep->hw_ep, fifo_count,
443 (u8 *) (request->buf + request->actual)); 465 (u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
627 return; 649 return;
628 } 650 }
629 651
630 if (is_cppi_enabled() && musb_ep->dma) { 652 if (is_cppi_enabled() && is_buffer_mapped(req)) {
631 struct dma_controller *c = musb->dma_controller; 653 struct dma_controller *c = musb->dma_controller;
632 struct dma_channel *channel = musb_ep->dma; 654 struct dma_channel *channel = musb_ep->dma;
633 655
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
658 len = musb_readw(epio, MUSB_RXCOUNT); 680 len = musb_readw(epio, MUSB_RXCOUNT);
659 if (request->actual < request->length) { 681 if (request->actual < request->length) {
660#ifdef CONFIG_USB_INVENTRA_DMA 682#ifdef CONFIG_USB_INVENTRA_DMA
661 if (is_dma_capable() && musb_ep->dma) { 683 if (is_buffer_mapped(req)) {
662 struct dma_controller *c; 684 struct dma_controller *c;
663 struct dma_channel *channel; 685 struct dma_channel *channel;
664 int use_dma = 0; 686 int use_dma = 0;
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
742 fifo_count = min_t(unsigned, len, fifo_count); 764 fifo_count = min_t(unsigned, len, fifo_count);
743 765
744#ifdef CONFIG_USB_TUSB_OMAP_DMA 766#ifdef CONFIG_USB_TUSB_OMAP_DMA
745 if (tusb_dma_omap() && musb_ep->dma) { 767 if (tusb_dma_omap() && is_buffer_mapped(req)) {
746 struct dma_controller *c = musb->dma_controller; 768 struct dma_controller *c = musb->dma_controller;
747 struct dma_channel *channel = musb_ep->dma; 769 struct dma_channel *channel = musb_ep->dma;
748 u32 dma_addr = request->dma + request->actual; 770 u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
762 * programming fails. This buffer is mapped if the 784 * programming fails. This buffer is mapped if the
763 * channel allocation is successful 785 * channel allocation is successful
764 */ 786 */
765 if (is_dma_capable() && musb_ep->dma) { 787 if (is_buffer_mapped(req)) {
766 unmap_dma_buffer(req, musb); 788 unmap_dma_buffer(req, musb);
767 789
768 /* 790 /*
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
989 /* Set TXMAXP with the FIFO size of the endpoint 1011 /* Set TXMAXP with the FIFO size of the endpoint
990 * to disable double buffering mode. 1012 * to disable double buffering mode.
991 */ 1013 */
992 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); 1014 if (musb->double_buffer_not_ok)
1015 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1016 else
1017 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1018 | (musb_ep->hb_mult << 11));
993 1019
994 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 1020 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
995 if (musb_readw(regs, MUSB_TXCSR) 1021 if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
1025 /* Set RXMAXP with the FIFO size of the endpoint 1051 /* Set RXMAXP with the FIFO size of the endpoint
1026 * to disable double buffering mode. 1052 * to disable double buffering mode.
1027 */ 1053 */
1028 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); 1054 if (musb->double_buffer_not_ok)
1055 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1056 else
1057 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1058 | (musb_ep->hb_mult << 11));
1029 1059
1030 /* force shared fifo to OUT-only mode */ 1060 /* force shared fifo to OUT-only mode */
1031 if (hw_ep->is_shared_fifo) { 1061 if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1214 request->epnum = musb_ep->current_epnum; 1244 request->epnum = musb_ep->current_epnum;
1215 request->tx = musb_ep->is_in; 1245 request->tx = musb_ep->is_in;
1216 1246
1217 if (is_dma_capable() && musb_ep->dma) 1247 map_dma_buffer(request, musb, musb_ep);
1218 map_dma_buffer(request, musb);
1219 else
1220 request->mapped = 0;
1221 1248
1222 spin_lock_irqsave(&musb->lock, lockflags); 1249 spin_lock_irqsave(&musb->lock, lockflags);
1223 1250
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc008191..a55354fbccf5 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
35#ifndef __MUSB_GADGET_H 35#ifndef __MUSB_GADGET_H
36#define __MUSB_GADGET_H 36#define __MUSB_GADGET_H
37 37
38enum buffer_map_state {
39 UN_MAPPED = 0,
40 PRE_MAPPED,
41 MUSB_MAPPED
42};
43
38struct musb_request { 44struct musb_request {
39 struct usb_request request; 45 struct usb_request request;
40 struct musb_ep *ep; 46 struct musb_ep *ep;
41 struct musb *musb; 47 struct musb *musb;
42 u8 tx; /* endpoint direction */ 48 u8 tx; /* endpoint direction */
43 u8 epnum; 49 u8 epnum;
44 u8 mapped; 50 enum buffer_map_state map_state;
45}; 51};
46 52
47static inline struct musb_request *to_musb_request(struct usb_request *req) 53static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4e14d2..0f523d7db57b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
609 /* Set RXMAXP with the FIFO size of the endpoint 609 /* Set RXMAXP with the FIFO size of the endpoint
610 * to disable double buffer mode. 610 * to disable double buffer mode.
611 */ 611 */
612 if (musb->hwvers < MUSB_HWVERS_2000) 612 if (musb->double_buffer_not_ok)
613 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 613 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
614 else 614 else
615 musb_writew(ep->regs, MUSB_RXMAXP, 615 musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
784 /* protocol/endpoint/interval/NAKlimit */ 784 /* protocol/endpoint/interval/NAKlimit */
785 if (epnum) { 785 if (epnum) {
786 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 786 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
787 if (can_bulk_split(musb, qh->type)) 787 if (musb->double_buffer_not_ok)
788 musb_writew(epio, MUSB_TXMAXP, 788 musb_writew(epio, MUSB_TXMAXP,
789 packet_sz 789 hw_ep->max_packet_sz_tx);
790 | ((hw_ep->max_packet_sz_tx /
791 packet_sz) - 1) << 11);
792 else 790 else
793 musb_writew(epio, MUSB_TXMAXP, 791 musb_writew(epio, MUSB_TXMAXP,
794 packet_sz); 792 qh->maxpacket |
793 ((qh->hb_mult - 1) << 11));
795 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 794 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
796 } else { 795 } else {
797 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 796 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62f151c..21056c924c74 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
94{ 94{
95 musb_writew(mbase, 95 musb_writew(mbase,
96 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), 96 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
97 ((u16)((u32) dma_addr & 0xFFFF))); 97 dma_addr);
98 musb_writew(mbase, 98 musb_writew(mbase,
99 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), 99 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
100 ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); 100 (dma_addr >> 16));
101} 101}
102 102
103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) 103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
104{ 104{
105 return musb_readl(mbase, 105 u32 count = musb_readw(mbase,
106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); 106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
107
108 count = count << 16;
109
110 count |= musb_readw(mbase,
111 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
112
113 return count;
107} 114}
108 115
109static inline void musb_write_hsdma_count(void __iomem *mbase, 116static inline void musb_write_hsdma_count(void __iomem *mbase,
110 u8 bchannel, u32 len) 117 u8 bchannel, u32 len)
111{ 118{
112 musb_writel(mbase, 119 musb_writew(mbase,
120 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
121 musb_writew(mbase,
113 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), 122 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
114 len); 123 (len >> 16));
115} 124}
116 125
117#endif /* CONFIG_BLACKFIN */ 126#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d5f09c..9ffc8237fb4b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K
103 required after resetting the hardware and power management. 103 required after resetting the hardware and power management.
104 This driver is required even for peripheral only or host only 104 This driver is required even for peripheral only or host only
105 mode configurations. 105 mode configurations.
106 This driver is not supported on boards like trout which
107 has an external PHY.
106 108
107config AB8500_USB 109config AB8500_USB
108 tristate "AB8500 USB Transceiver Driver" 110 tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index e70014ab0976..8acf165fe13b 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -132,6 +132,8 @@ static int __devinit nop_usb_xceiv_probe(struct platform_device *pdev)
132 132
133 platform_set_drvdata(pdev, nop); 133 platform_set_drvdata(pdev, nop);
134 134
135 BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
136
135 return 0; 137 return 0;
136exit: 138exit:
137 kfree(nop); 139 kfree(nop);
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 059d9ac0ab5b..770d799d5afb 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -45,7 +45,7 @@ struct ulpi_info {
45/* ULPI hardcoded IDs, used for probing */ 45/* ULPI hardcoded IDs, used for probing */
46static struct ulpi_info ulpi_ids[] = { 46static struct ulpi_info ulpi_ids[] = {
47 ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"), 47 ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
48 ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"), 48 ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
49}; 49};
50 50
51static int ulpi_set_otg_flags(struct otg_transceiver *otg) 51static int ulpi_set_otg_flags(struct otg_transceiver *otg)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 63f7cc45bcac..7b8815ddf368 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -486,12 +486,22 @@ static void ch341_read_int_callback(struct urb *urb)
486 if (actual_length >= 4) { 486 if (actual_length >= 4) {
487 struct ch341_private *priv = usb_get_serial_port_data(port); 487 struct ch341_private *priv = usb_get_serial_port_data(port);
488 unsigned long flags; 488 unsigned long flags;
489 u8 prev_line_status = priv->line_status;
489 490
490 spin_lock_irqsave(&priv->lock, flags); 491 spin_lock_irqsave(&priv->lock, flags);
491 priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT; 492 priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
492 if ((data[1] & CH341_MULT_STAT)) 493 if ((data[1] & CH341_MULT_STAT))
493 priv->multi_status_change = 1; 494 priv->multi_status_change = 1;
494 spin_unlock_irqrestore(&priv->lock, flags); 495 spin_unlock_irqrestore(&priv->lock, flags);
496
497 if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
498 struct tty_struct *tty = tty_port_tty_get(&port->port);
499 if (tty)
500 usb_serial_handle_dcd_change(port, tty,
501 priv->line_status & CH341_BIT_DCD);
502 tty_kref_put(tty);
503 }
504
495 wake_up_interruptible(&priv->delta_msr_wait); 505 wake_up_interruptible(&priv->delta_msr_wait);
496 } 506 }
497 507
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8d7731dbf478..735ea03157ab 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -49,7 +49,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
49static void cp210x_break_ctl(struct tty_struct *, int); 49static void cp210x_break_ctl(struct tty_struct *, int);
50static int cp210x_startup(struct usb_serial *); 50static int cp210x_startup(struct usb_serial *);
51static void cp210x_dtr_rts(struct usb_serial_port *p, int on); 51static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
52static int cp210x_carrier_raised(struct usb_serial_port *p);
53 52
54static int debug; 53static int debug;
55 54
@@ -87,7 +86,6 @@ static const struct usb_device_id id_table[] = {
87 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ 86 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
88 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ 87 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
89 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ 88 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
90 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
91 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 89 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
92 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 90 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
93 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ 91 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -110,7 +108,9 @@ static const struct usb_device_id id_table[] = {
110 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 108 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
111 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ 109 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
112 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 110 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
111 { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
113 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ 112 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
113 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
114 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 114 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
115 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 115 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
116 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 116 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
165 .tiocmget = cp210x_tiocmget, 165 .tiocmget = cp210x_tiocmget,
166 .tiocmset = cp210x_tiocmset, 166 .tiocmset = cp210x_tiocmset,
167 .attach = cp210x_startup, 167 .attach = cp210x_startup,
168 .dtr_rts = cp210x_dtr_rts, 168 .dtr_rts = cp210x_dtr_rts
169 .carrier_raised = cp210x_carrier_raised
170}; 169};
171 170
172/* Config request types */ 171/* Config request types */
@@ -765,15 +764,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
765 return result; 764 return result;
766} 765}
767 766
768static int cp210x_carrier_raised(struct usb_serial_port *p)
769{
770 unsigned int control;
771 cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
772 if (control & CONTROL_DCD)
773 return 1;
774 return 0;
775}
776
777static void cp210x_break_ctl (struct tty_struct *tty, int break_state) 767static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
778{ 768{
779 struct usb_serial_port *port = tty->driver_data; 769 struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index b92070c103cd..666e5a6edd82 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
455static int digi_chars_in_buffer(struct tty_struct *tty); 455static int digi_chars_in_buffer(struct tty_struct *tty);
456static int digi_open(struct tty_struct *tty, struct usb_serial_port *port); 456static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
457static void digi_close(struct usb_serial_port *port); 457static void digi_close(struct usb_serial_port *port);
458static int digi_carrier_raised(struct usb_serial_port *port);
459static void digi_dtr_rts(struct usb_serial_port *port, int on); 458static void digi_dtr_rts(struct usb_serial_port *port, int on);
460static int digi_startup_device(struct usb_serial *serial); 459static int digi_startup_device(struct usb_serial *serial);
461static int digi_startup(struct usb_serial *serial); 460static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
511 .open = digi_open, 510 .open = digi_open,
512 .close = digi_close, 511 .close = digi_close,
513 .dtr_rts = digi_dtr_rts, 512 .dtr_rts = digi_dtr_rts,
514 .carrier_raised = digi_carrier_raised,
515 .write = digi_write, 513 .write = digi_write,
516 .write_room = digi_write_room, 514 .write_room = digi_write_room,
517 .write_bulk_callback = digi_write_bulk_callback, 515 .write_bulk_callback = digi_write_bulk_callback,
@@ -1339,14 +1337,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
1339 digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1); 1337 digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
1340} 1338}
1341 1339
1342static int digi_carrier_raised(struct usb_serial_port *port)
1343{
1344 struct digi_port *priv = usb_get_serial_port_data(port);
1345 if (priv->dp_modem_signals & TIOCM_CD)
1346 return 1;
1347 return 0;
1348}
1349
1350static int digi_open(struct tty_struct *tty, struct usb_serial_port *port) 1340static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
1351{ 1341{
1352 int ret; 1342 int ret;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a2668d089260..f349a3629d00 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk {
100static int ftdi_jtag_probe(struct usb_serial *serial); 100static int ftdi_jtag_probe(struct usb_serial *serial);
101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
102static int ftdi_NDI_device_setup(struct usb_serial *serial); 102static int ftdi_NDI_device_setup(struct usb_serial *serial);
103static int ftdi_stmclite_probe(struct usb_serial *serial);
103static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 104static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
104static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 105static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
105 106
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
123 .port_probe = ftdi_HE_TIRA1_setup, 124 .port_probe = ftdi_HE_TIRA1_setup,
124}; 125};
125 126
127static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
128 .probe = ftdi_stmclite_probe,
129};
130
126/* 131/*
127 * The 8U232AM has the same API as the sio except for: 132 * The 8U232AM has the same API as the sio except for:
128 * - it can support MUCH higher baudrates; up to: 133 * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = {
616 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, 621 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
617 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, 622 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
618 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 623 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
624 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
619 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, 625 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
620 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 626 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
621 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 627 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -676,7 +682,17 @@ static struct usb_device_id id_table_combined [] = {
676 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, 682 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
677 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, 683 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
678 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, 684 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
679 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, 685 { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
686 { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
687 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
688 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
689 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
690 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
691 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
692 { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
693 { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
694 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
695 { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
680 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, 696 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
681 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, 697 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
682 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, 698 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -800,6 +816,8 @@ static struct usb_device_id id_table_combined [] = {
800 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 816 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
801 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 817 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
802 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 818 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
819 { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
820 .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
803 { }, /* Optional parameter entry */ 821 { }, /* Optional parameter entry */
804 { } /* Terminating entry */ 822 { } /* Terminating entry */
805}; 823};
@@ -1699,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1699} 1717}
1700 1718
1701/* 1719/*
1720 * First and second port on STMCLiteadaptors is reserved for JTAG interface
1721 * and the forth port for pio
1722 */
1723static int ftdi_stmclite_probe(struct usb_serial *serial)
1724{
1725 struct usb_device *udev = serial->dev;
1726 struct usb_interface *interface = serial->interface;
1727
1728 dbg("%s", __func__);
1729
1730 if (interface == udev->actconfig->interface[2])
1731 return 0;
1732
1733 dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
1734
1735 return -ENODEV;
1736}
1737
1738/*
1702 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. 1739 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
1703 * We have to correct it if we want to read from it. 1740 * We have to correct it if we want to read from it.
1704 */ 1741 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bf0867285481..117e8e6f93c6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
518#define RATOC_PRODUCT_ID_USB60F 0xb020 518#define RATOC_PRODUCT_ID_USB60F 0xb020
519 519
520/* 520/*
521 * Acton Research Corp.
522 */
523#define ACTON_VID 0x0647 /* Vendor ID */
524#define ACTON_SPECTRAPRO_PID 0x0100
525
526/*
521 * Contec products (http://www.contec.com) 527 * Contec products (http://www.contec.com)
522 * Submitted by Daniel Sangorrin 528 * Submitted by Daniel Sangorrin
523 */ 529 */
@@ -569,11 +575,23 @@
569#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ 575#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
570 576
571/* 577/*
572 * Icom ID-1 digital transceiver 578 * Definitions for Icom Inc. devices
573 */ 579 */
574 580#define ICOM_VID 0x0C26 /* Icom vendor ID */
575#define ICOM_ID1_VID 0x0C26 581/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
576#define ICOM_ID1_PID 0x0004 582#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
583/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
584#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
585/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
586#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
587#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
588#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
589#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
590#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
591#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
592#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
593#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
594#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
577 595
578/* 596/*
579 * GN Otometrics (http://www.otometrics.com) 597 * GN Otometrics (http://www.otometrics.com)
@@ -1022,6 +1040,12 @@
1022#define WHT_PID 0x0004 /* Wireless Handheld Terminal */ 1040#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
1023 1041
1024/* 1042/*
1043 * STMicroelectonics
1044 */
1045#define ST_VID 0x0483
1046#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
1047
1048/*
1025 * Papouch products (http://www.papouch.com/) 1049 * Papouch products (http://www.papouch.com/)
1026 * Submitted by Folkert van Heusden 1050 * Submitted by Folkert van Heusden
1027 */ 1051 */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e6833e216fc9..e4db5ad2bc55 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -479,6 +479,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
479} 479}
480EXPORT_SYMBOL_GPL(usb_serial_handle_break); 480EXPORT_SYMBOL_GPL(usb_serial_handle_break);
481 481
482/**
483 * usb_serial_handle_dcd_change - handle a change of carrier detect state
484 * @port: usb_serial_port structure for the open port
485 * @tty: tty_struct structure for the port
486 * @status: new carrier detect status, nonzero if active
487 */
488void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
489 struct tty_struct *tty, unsigned int status)
490{
491 struct tty_port *port = &usb_port->port;
492
493 dbg("%s - port %d, status %d", __func__, usb_port->number, status);
494
495 if (status)
496 wake_up_interruptible(&port->open_wait);
497 else if (tty && !C_CLOCAL(tty))
498 tty_hangup(tty);
499}
500EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
501
482int usb_serial_generic_resume(struct usb_serial *serial) 502int usb_serial_generic_resume(struct usb_serial *serial)
483{ 503{
484 struct usb_serial_port *port; 504 struct usb_serial_port *port;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef24f8a..3b246d93cf22 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2889 2889
2890 dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); 2890 dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
2891 2891
2892 edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; 2892 edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
2893 edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; 2893 edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
2894 edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); 2894 edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
2895 2895
2896 for (rec = ihex_next_binrec(rec); rec; 2896 for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 6ab2a3f97fe8..178b22eb32b1 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -199,6 +199,7 @@ static struct usb_serial_driver epic_device = {
199 .name = "epic", 199 .name = "epic",
200 }, 200 },
201 .description = "EPiC device", 201 .description = "EPiC device",
202 .usb_driver = &io_driver,
202 .id_table = Epic_port_id_table, 203 .id_table = Epic_port_id_table,
203 .num_ports = 1, 204 .num_ports = 1,
204 .open = edge_open, 205 .open = edge_open,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 12ed594f5f80..99b97c04896f 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1275,6 +1275,7 @@ static struct usb_serial_driver iuu_device = {
1275 .name = "iuu_phoenix", 1275 .name = "iuu_phoenix",
1276 }, 1276 },
1277 .id_table = id_table, 1277 .id_table = id_table,
1278 .usb_driver = &iuu_driver,
1278 .num_ports = 1, 1279 .num_ports = 1,
1279 .bulk_in_size = 512, 1280 .bulk_in_size = 512,
1280 .bulk_out_size = 512, 1281 .bulk_out_size = 512,
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 2d8baf6ac472..ce134dc28ddf 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -546,6 +546,7 @@ static struct usb_serial_driver keyspan_pre_device = {
546 .name = "keyspan_no_firm", 546 .name = "keyspan_no_firm",
547 }, 547 },
548 .description = "Keyspan - (without firmware)", 548 .description = "Keyspan - (without firmware)",
549 .usb_driver = &keyspan_driver,
549 .id_table = keyspan_pre_ids, 550 .id_table = keyspan_pre_ids,
550 .num_ports = 1, 551 .num_ports = 1,
551 .attach = keyspan_fake_startup, 552 .attach = keyspan_fake_startup,
@@ -557,6 +558,7 @@ static struct usb_serial_driver keyspan_1port_device = {
557 .name = "keyspan_1", 558 .name = "keyspan_1",
558 }, 559 },
559 .description = "Keyspan 1 port adapter", 560 .description = "Keyspan 1 port adapter",
561 .usb_driver = &keyspan_driver,
560 .id_table = keyspan_1port_ids, 562 .id_table = keyspan_1port_ids,
561 .num_ports = 1, 563 .num_ports = 1,
562 .open = keyspan_open, 564 .open = keyspan_open,
@@ -579,6 +581,7 @@ static struct usb_serial_driver keyspan_2port_device = {
579 .name = "keyspan_2", 581 .name = "keyspan_2",
580 }, 582 },
581 .description = "Keyspan 2 port adapter", 583 .description = "Keyspan 2 port adapter",
584 .usb_driver = &keyspan_driver,
582 .id_table = keyspan_2port_ids, 585 .id_table = keyspan_2port_ids,
583 .num_ports = 2, 586 .num_ports = 2,
584 .open = keyspan_open, 587 .open = keyspan_open,
@@ -601,6 +604,7 @@ static struct usb_serial_driver keyspan_4port_device = {
601 .name = "keyspan_4", 604 .name = "keyspan_4",
602 }, 605 },
603 .description = "Keyspan 4 port adapter", 606 .description = "Keyspan 4 port adapter",
607 .usb_driver = &keyspan_driver,
604 .id_table = keyspan_4port_ids, 608 .id_table = keyspan_4port_ids,
605 .num_ports = 4, 609 .num_ports = 4,
606 .open = keyspan_open, 610 .open = keyspan_open,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index a10dd5676ccc..554a8693a463 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -679,22 +679,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
679 } 679 }
680} 680}
681 681
682static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
683{
684 struct usb_serial *serial = port->serial;
685 unsigned char modembits;
686
687 /* If we can read the modem status and the DCD is low then
688 carrier is not raised yet */
689 if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
690 if (!(modembits & (1>>6)))
691 return 0;
692 }
693 /* Carrier raised, or we failed (eg disconnected) so
694 progress accordingly */
695 return 1;
696}
697
698 682
699static int keyspan_pda_open(struct tty_struct *tty, 683static int keyspan_pda_open(struct tty_struct *tty,
700 struct usb_serial_port *port) 684 struct usb_serial_port *port)
@@ -881,7 +865,6 @@ static struct usb_serial_driver keyspan_pda_device = {
881 .id_table = id_table_std, 865 .id_table = id_table_std,
882 .num_ports = 1, 866 .num_ports = 1,
883 .dtr_rts = keyspan_pda_dtr_rts, 867 .dtr_rts = keyspan_pda_dtr_rts,
884 .carrier_raised = keyspan_pda_carrier_raised,
885 .open = keyspan_pda_open, 868 .open = keyspan_pda_open,
886 .close = keyspan_pda_close, 869 .close = keyspan_pda_close,
887 .write = keyspan_pda_write, 870 .write = keyspan_pda_write,
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index cf1718394e18..653465f61d4a 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -44,6 +44,7 @@ static struct usb_serial_driver moto_device = {
44 .name = "moto-modem", 44 .name = "moto-modem",
45 }, 45 },
46 .id_table = id_table, 46 .id_table = id_table,
47 .usb_driver = &moto_driver,
47 .num_ports = 1, 48 .num_ports = 1,
48}; 49};
49 50
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 748778288d94..5f46838dfee5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -382,7 +382,16 @@ static void option_instat_callback(struct urb *urb);
382#define HAIER_VENDOR_ID 0x201e 382#define HAIER_VENDOR_ID 0x201e
383#define HAIER_PRODUCT_CE100 0x2009 383#define HAIER_PRODUCT_CE100 0x2009
384 384
385#define CINTERION_VENDOR_ID 0x0681 385/* Cinterion (formerly Siemens) products */
386#define SIEMENS_VENDOR_ID 0x0681
387#define CINTERION_VENDOR_ID 0x1e2d
388#define CINTERION_PRODUCT_HC25_MDM 0x0047
389#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
390#define CINTERION_PRODUCT_HC28_MDM 0x004C
391#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
392#define CINTERION_PRODUCT_EU3_E 0x0051
393#define CINTERION_PRODUCT_EU3_P 0x0052
394#define CINTERION_PRODUCT_PH8 0x0053
386 395
387/* Olivetti products */ 396/* Olivetti products */
388#define OLIVETTI_VENDOR_ID 0x0b3c 397#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -944,7 +953,17 @@ static const struct usb_device_id option_ids[] = {
944 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, 953 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
945 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, 954 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
946 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, 955 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
947 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, 956 /* Cinterion */
957 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
958 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
959 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
960 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
961 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
962 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
963 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
964 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
965 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
966
948 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 967 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
949 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 968 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
950 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 969 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 5be866bb7a41..73613205be7a 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -157,6 +157,7 @@ static struct usb_serial_driver oti6858_device = {
157 .name = "oti6858", 157 .name = "oti6858",
158 }, 158 },
159 .id_table = id_table, 159 .id_table = id_table,
160 .usb_driver = &oti6858_driver,
160 .num_ports = 1, 161 .num_ports = 1,
161 .open = oti6858_open, 162 .open = oti6858_open,
162 .close = oti6858_close, 163 .close = oti6858_close,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 8ae4c6cbc38a..08c9181b8e48 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = {
50 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, 50 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
51 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, 51 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
52 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, 52 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
53 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
53 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 54 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
54 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 55 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, 56 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
677{ 678{
678 679
679 struct pl2303_private *priv = usb_get_serial_port_data(port); 680 struct pl2303_private *priv = usb_get_serial_port_data(port);
681 struct tty_struct *tty;
680 unsigned long flags; 682 unsigned long flags;
681 u8 status_idx = UART_STATE; 683 u8 status_idx = UART_STATE;
682 u8 length = UART_STATE + 1; 684 u8 length = UART_STATE + 1;
685 u8 prev_line_status;
683 u16 idv, idp; 686 u16 idv, idp;
684 687
685 idv = le16_to_cpu(port->serial->dev->descriptor.idVendor); 688 idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
701 704
702 /* Save off the uart status for others to look at */ 705 /* Save off the uart status for others to look at */
703 spin_lock_irqsave(&priv->lock, flags); 706 spin_lock_irqsave(&priv->lock, flags);
707 prev_line_status = priv->line_status;
704 priv->line_status = data[status_idx]; 708 priv->line_status = data[status_idx];
705 spin_unlock_irqrestore(&priv->lock, flags); 709 spin_unlock_irqrestore(&priv->lock, flags);
706 if (priv->line_status & UART_BREAK_ERROR) 710 if (priv->line_status & UART_BREAK_ERROR)
707 usb_serial_handle_break(port); 711 usb_serial_handle_break(port);
708 wake_up_interruptible(&priv->delta_msr_wait); 712 wake_up_interruptible(&priv->delta_msr_wait);
713
714 tty = tty_port_tty_get(&port->port);
715 if (!tty)
716 return;
717 if ((priv->line_status ^ prev_line_status) & UART_DCD)
718 usb_serial_handle_dcd_change(port, tty,
719 priv->line_status & UART_DCD);
720 tty_kref_put(tty);
709} 721}
710 722
711static void pl2303_read_int_callback(struct urb *urb) 723static void pl2303_read_int_callback(struct urb *urb)
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 43eb9bdad422..1b025f75dafd 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -21,6 +21,7 @@
21#define PL2303_PRODUCT_ID_MMX 0x0612 21#define PL2303_PRODUCT_ID_MMX 0x0612
22#define PL2303_PRODUCT_ID_GPRS 0x0609 22#define PL2303_PRODUCT_ID_GPRS 0x0609
23#define PL2303_PRODUCT_ID_HCR331 0x331a 23#define PL2303_PRODUCT_ID_HCR331 0x331a
24#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
24 25
25#define ATEN_VENDOR_ID 0x0557 26#define ATEN_VENDOR_ID 0x0557
26#define ATEN_VENDOR_ID2 0x0547 27#define ATEN_VENDOR_ID2 0x0547
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 214a3e504292..30b73e68a904 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -36,6 +36,7 @@
36#define UTSTARCOM_PRODUCT_UM175_V1 0x3712 36#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
37#define UTSTARCOM_PRODUCT_UM175_V2 0x3714 37#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
38#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715 38#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
39#define PANTECH_PRODUCT_UML290_VZW 0x3718
39 40
40/* CMOTECH devices */ 41/* CMOTECH devices */
41#define CMOTECH_VENDOR_ID 0x16d8 42#define CMOTECH_VENDOR_ID 0x16d8
@@ -66,6 +67,7 @@ static struct usb_device_id id_table[] = {
66 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, 67 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
67 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, 68 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
68 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) }, 69 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
70 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
69 { }, 71 { },
70}; 72};
71MODULE_DEVICE_TABLE(usb, id_table); 73MODULE_DEVICE_TABLE(usb, id_table);
@@ -84,6 +86,7 @@ static struct usb_serial_driver qcaux_device = {
84 .name = "qcaux", 86 .name = "qcaux",
85 }, 87 },
86 .id_table = id_table, 88 .id_table = id_table,
89 .usb_driver = &qcaux_driver,
87 .num_ports = 1, 90 .num_ports = 1,
88}; 91};
89 92
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index cb8195cabfde..74cd4ccdb3fc 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -42,6 +42,7 @@ static struct usb_serial_driver siemens_usb_mpi_device = {
42 .name = "siemens_mpi", 42 .name = "siemens_mpi",
43 }, 43 },
44 .id_table = id_table, 44 .id_table = id_table,
45 .usb_driver = &siemens_usb_mpi_driver,
45 .num_ports = 1, 46 .num_ports = 1,
46}; 47};
47 48
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 765aa983bf58..cbfb70bffdd0 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg {
133 133
134/* how come ??? */ 134/* how come ??? */
135#define UART_STATE 0x08 135#define UART_STATE 0x08
136#define UART_STATE_TRANSIENT_MASK 0x74 136#define UART_STATE_TRANSIENT_MASK 0x75
137#define UART_DCD 0x01 137#define UART_DCD 0x01
138#define UART_DSR 0x02 138#define UART_DSR 0x02
139#define UART_BREAK_ERROR 0x04 139#define UART_BREAK_ERROR 0x04
@@ -525,6 +525,10 @@ static void spcp8x5_process_read_urb(struct urb *urb)
525 /* overrun is special, not associated with a char */ 525 /* overrun is special, not associated with a char */
526 if (status & UART_OVERRUN_ERROR) 526 if (status & UART_OVERRUN_ERROR)
527 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 527 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
528
529 if (status & UART_DCD)
530 usb_serial_handle_dcd_change(port, tty,
531 priv->line_status & MSR_STATUS_LINE_DCD);
528 } 532 }
529 533
530 tty_insert_flip_string_fixed_flag(tty, data, tty_flag, 534 tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
@@ -645,6 +649,7 @@ static struct usb_serial_driver spcp8x5_device = {
645 .name = "SPCP8x5", 649 .name = "SPCP8x5",
646 }, 650 },
647 .id_table = id_table, 651 .id_table = id_table,
652 .usb_driver = &spcp8x5_driver,
648 .num_ports = 1, 653 .num_ports = 1,
649 .open = spcp8x5_open, 654 .open = spcp8x5_open,
650 .dtr_rts = spcp8x5_dtr_rts, 655 .dtr_rts = spcp8x5_dtr_rts,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f307b47..a910004f4079 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@ failed_1port:
369 369
370static void __exit ti_exit(void) 370static void __exit ti_exit(void)
371{ 371{
372 usb_deregister(&ti_usb_driver);
372 usb_serial_deregister(&ti_1port_device); 373 usb_serial_deregister(&ti_1port_device);
373 usb_serial_deregister(&ti_2port_device); 374 usb_serial_deregister(&ti_2port_device);
374 usb_deregister(&ti_usb_driver);
375} 375}
376 376
377 377
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6954de50c0ff..546a52179bec 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1344,11 +1344,15 @@ int usb_serial_register(struct usb_serial_driver *driver)
1344 return -ENODEV; 1344 return -ENODEV;
1345 1345
1346 fixup_generic(driver); 1346 fixup_generic(driver);
1347 if (driver->usb_driver)
1348 driver->usb_driver->supports_autosuspend = 1;
1349 1347
1350 if (!driver->description) 1348 if (!driver->description)
1351 driver->description = driver->driver.name; 1349 driver->description = driver->driver.name;
1350 if (!driver->usb_driver) {
1351 WARN(1, "Serial driver %s has no usb_driver\n",
1352 driver->description);
1353 return -EINVAL;
1354 }
1355 driver->usb_driver->supports_autosuspend = 1;
1352 1356
1353 /* Add this device to our list of devices */ 1357 /* Add this device to our list of devices */
1354 mutex_lock(&table_lock); 1358 mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index f2ed6a31be77..95a82148ee81 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -75,6 +75,7 @@ static struct usb_serial_driver debug_device = {
75 .name = "debug", 75 .name = "debug",
76 }, 76 },
77 .id_table = id_table, 77 .id_table = id_table,
78 .usb_driver = &debug_driver,
78 .num_ports = 1, 79 .num_ports = 1,
79 .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE, 80 .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
80 .break_ctl = usb_debug_break_ctl, 81 .break_ctl = usb_debug_break_ctl,
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index c854fdebe0ae..2c8553026222 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
31 "Cypress ISD-300LP", 31 "Cypress ISD-300LP",
32 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), 32 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
33 33
34UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
35 "Super Top",
36 "USB 2.0 SATA BRIDGE",
37 USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
38
34#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */ 39#endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fcc1e32ce256..c1602b8c5594 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1044,6 +1044,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
1044 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1044 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1045 US_FL_BULK32), 1045 US_FL_BULK32),
1046 1046
1047/* Reported by <ttkspam@free.fr>
1048 * The device reports a vendor-specific device class, requiring an
1049 * explicit vendor/product match.
1050 */
1051UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
1052 "MagicPixel",
1053 "FW_Omega2",
1054 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
1055
1047/* Andrew Lunn <andrew@lunn.ch> 1056/* Andrew Lunn <andrew@lunn.ch>
1048 * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL 1057 * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
1049 * on LUN 4. 1058 * on LUN 4.
@@ -1388,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
1388 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1397 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1389 US_FL_IGNORE_RESIDUE ), 1398 US_FL_IGNORE_RESIDUE ),
1390 1399
1400/* Submitted by Nick Holloway */
1401UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
1402 "VTech",
1403 "Kidizoom",
1404 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1405 US_FL_FIX_CAPACITY ),
1406
1391/* Reported by Michael Stattmann <michael@stattmann.com> */ 1407/* Reported by Michael Stattmann <michael@stattmann.com> */
1392UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1408UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1393 "Sony Ericsson", 1409 "Sony Ericsson",
@@ -1872,6 +1888,22 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
1872 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1888 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1873 US_FL_NO_READ_DISC_INFO ), 1889 US_FL_NO_READ_DISC_INFO ),
1874 1890
1891/* Patch by Richard Schütz <r.schtz@t-online.de>
1892 * This external hard drive enclosure uses a JMicron chip which
1893 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
1894UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
1895 "TrekStor GmbH & Co. KG",
1896 "DataStation maxi g.u",
1897 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1898 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
1899
1900/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
1901UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
1902 "Coby Electronics",
1903 "MP3 Player",
1904 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1905 US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
1906
1875UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, 1907UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1876 "ST", 1908 "ST",
1877 "2A", 1909 "2A",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b3ca103135f..f616cefc95ba 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net)
128 size_t hdr_size; 128 size_t hdr_size;
129 struct socket *sock; 129 struct socket *sock;
130 130
131 /* TODO: check that we are running from vhost_worker? 131 /* TODO: check that we are running from vhost_worker? */
132 * Not sure it's worth it, it's straight-forward enough. */
133 sock = rcu_dereference_check(vq->private_data, 1); 132 sock = rcu_dereference_check(vq->private_data, 1);
134 if (!sock) 133 if (!sock)
135 return; 134 return;
@@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net)
306 size_t len, total_len = 0; 305 size_t len, total_len = 0;
307 int err; 306 int err;
308 size_t hdr_size; 307 size_t hdr_size;
309 struct socket *sock = rcu_dereference(vq->private_data); 308 /* TODO: check that we are running from vhost_worker? */
309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
311 return; 311 return;
312 312
@@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
415 int err, headcount; 415 int err, headcount;
416 size_t vhost_hlen, sock_hlen; 416 size_t vhost_hlen, sock_hlen;
417 size_t vhost_len, sock_len; 417 size_t vhost_len, sock_len;
418 struct socket *sock = rcu_dereference(vq->private_data); 418 /* TODO: check that we are running from vhost_worker? */
419 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
419 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 420 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
420 return; 421 return;
421 422
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f59cdd9..ade0568c07a4 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll)
97 remove_wait_queue(poll->wqh, &poll->wait); 97 remove_wait_queue(poll->wqh, &poll->wait);
98} 98}
99 99
100static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
101 unsigned seq)
102{
103 int left;
104 spin_lock_irq(&dev->work_lock);
105 left = seq - work->done_seq;
106 spin_unlock_irq(&dev->work_lock);
107 return left <= 0;
108}
109
100static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) 110static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
101{ 111{
102 unsigned seq; 112 unsigned seq;
103 int left;
104 int flushing; 113 int flushing;
105 114
106 spin_lock_irq(&dev->work_lock); 115 spin_lock_irq(&dev->work_lock);
107 seq = work->queue_seq; 116 seq = work->queue_seq;
108 work->flushing++; 117 work->flushing++;
109 spin_unlock_irq(&dev->work_lock); 118 spin_unlock_irq(&dev->work_lock);
110 wait_event(work->done, ({ 119 wait_event(work->done, vhost_work_seq_done(dev, work, seq));
111 spin_lock_irq(&dev->work_lock);
112 left = seq - work->done_seq <= 0;
113 spin_unlock_irq(&dev->work_lock);
114 left;
115 }));
116 spin_lock_irq(&dev->work_lock); 120 spin_lock_irq(&dev->work_lock);
117 flushing = --work->flushing; 121 flushing = --work->flushing;
118 spin_unlock_irq(&dev->work_lock); 122 spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2af44b7b1f3f..b3363ae38518 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
173{ 173{
174 unsigned acked_features; 174 unsigned acked_features;
175 175
176 acked_features = 176 /* TODO: check that we are running from vhost_worker or dev mutex is
177 rcu_dereference_index_check(dev->acked_features, 177 * held? */
178 lockdep_is_held(&dev->mutex)); 178 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
179 return acked_features & (1 << bit); 179 return acked_features & (1 << bit);
180} 180}
181 181
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d916ac04abab..6bafb51bb437 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1227,7 +1227,7 @@ config FB_CARILLO_RANCH
1227 1227
1228config FB_INTEL 1228config FB_INTEL
1229 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)" 1229 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
1230 depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED 1230 depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
1231 select FB_MODE_HELPERS 1231 select FB_MODE_HELPERS
1232 select FB_CFB_FILLRECT 1232 select FB_CFB_FILLRECT
1233 select FB_CFB_COPYAREA 1233 select FB_CFB_COPYAREA
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index d583bea608fd..391ac939f011 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -23,7 +23,7 @@
23#include <linux/svga.h> 23#include <linux/svga.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */ 26#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
27#include <video/vga.h> 27#include <video/vga.h>
28 28
29#ifdef CONFIG_MTRR 29#ifdef CONFIG_MTRR
@@ -1091,12 +1091,12 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1091 1091
1092 dev_info(info->device, "suspend\n"); 1092 dev_info(info->device, "suspend\n");
1093 1093
1094 acquire_console_sem(); 1094 console_lock();
1095 mutex_lock(&(par->open_lock)); 1095 mutex_lock(&(par->open_lock));
1096 1096
1097 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { 1097 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1098 mutex_unlock(&(par->open_lock)); 1098 mutex_unlock(&(par->open_lock));
1099 release_console_sem(); 1099 console_unlock();
1100 return 0; 1100 return 0;
1101 } 1101 }
1102 1102
@@ -1107,7 +1107,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1107 pci_set_power_state(dev, pci_choose_state(dev, state)); 1107 pci_set_power_state(dev, pci_choose_state(dev, state));
1108 1108
1109 mutex_unlock(&(par->open_lock)); 1109 mutex_unlock(&(par->open_lock));
1110 release_console_sem(); 1110 console_unlock();
1111 1111
1112 return 0; 1112 return 0;
1113} 1113}
@@ -1122,7 +1122,7 @@ static int ark_pci_resume (struct pci_dev* dev)
1122 1122
1123 dev_info(info->device, "resume\n"); 1123 dev_info(info->device, "resume\n");
1124 1124
1125 acquire_console_sem(); 1125 console_lock();
1126 mutex_lock(&(par->open_lock)); 1126 mutex_lock(&(par->open_lock));
1127 1127
1128 if (par->ref_count == 0) 1128 if (par->ref_count == 0)
@@ -1141,7 +1141,7 @@ static int ark_pci_resume (struct pci_dev* dev)
1141 1141
1142fail: 1142fail:
1143 mutex_unlock(&(par->open_lock)); 1143 mutex_unlock(&(par->open_lock));
1144 release_console_sem(); 1144 console_unlock();
1145 return 0; 1145 return 0;
1146} 1146}
1147#else 1147#else
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index dd9de2e80580..4cb6a576c567 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1860,11 +1860,11 @@ static void aty128_early_resume(void *data)
1860{ 1860{
1861 struct aty128fb_par *par = data; 1861 struct aty128fb_par *par = data;
1862 1862
1863 if (try_acquire_console_sem()) 1863 if (!console_trylock())
1864 return; 1864 return;
1865 pci_restore_state(par->pdev); 1865 pci_restore_state(par->pdev);
1866 aty128_do_resume(par->pdev); 1866 aty128_do_resume(par->pdev);
1867 release_console_sem(); 1867 console_unlock();
1868} 1868}
1869#endif /* CONFIG_PPC_PMAC */ 1869#endif /* CONFIG_PPC_PMAC */
1870 1870
@@ -2438,7 +2438,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2438 2438
2439 printk(KERN_DEBUG "aty128fb: suspending...\n"); 2439 printk(KERN_DEBUG "aty128fb: suspending...\n");
2440 2440
2441 acquire_console_sem(); 2441 console_lock();
2442 2442
2443 fb_set_suspend(info, 1); 2443 fb_set_suspend(info, 1);
2444 2444
@@ -2470,7 +2470,7 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2470 if (state.event != PM_EVENT_ON) 2470 if (state.event != PM_EVENT_ON)
2471 aty128_set_suspend(par, 1); 2471 aty128_set_suspend(par, 1);
2472 2472
2473 release_console_sem(); 2473 console_unlock();
2474 2474
2475 pdev->dev.power.power_state = state; 2475 pdev->dev.power.power_state = state;
2476 2476
@@ -2527,9 +2527,9 @@ static int aty128_pci_resume(struct pci_dev *pdev)
2527{ 2527{
2528 int rc; 2528 int rc;
2529 2529
2530 acquire_console_sem(); 2530 console_lock();
2531 rc = aty128_do_resume(pdev); 2531 rc = aty128_do_resume(pdev);
2532 release_console_sem(); 2532 console_unlock();
2533 2533
2534 return rc; 2534 return rc;
2535} 2535}
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 767ab4fb1a05..94e293fce1d2 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2069,7 +2069,7 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2069 if (state.event == pdev->dev.power.power_state.event) 2069 if (state.event == pdev->dev.power.power_state.event)
2070 return 0; 2070 return 0;
2071 2071
2072 acquire_console_sem(); 2072 console_lock();
2073 2073
2074 fb_set_suspend(info, 1); 2074 fb_set_suspend(info, 1);
2075 2075
@@ -2097,14 +2097,14 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2097 par->lock_blank = 0; 2097 par->lock_blank = 0;
2098 atyfb_blank(FB_BLANK_UNBLANK, info); 2098 atyfb_blank(FB_BLANK_UNBLANK, info);
2099 fb_set_suspend(info, 0); 2099 fb_set_suspend(info, 0);
2100 release_console_sem(); 2100 console_unlock();
2101 return -EIO; 2101 return -EIO;
2102 } 2102 }
2103#else 2103#else
2104 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2104 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2105#endif 2105#endif
2106 2106
2107 release_console_sem(); 2107 console_unlock();
2108 2108
2109 pdev->dev.power.power_state = state; 2109 pdev->dev.power.power_state = state;
2110 2110
@@ -2133,7 +2133,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
2133 if (pdev->dev.power.power_state.event == PM_EVENT_ON) 2133 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2134 return 0; 2134 return 0;
2135 2135
2136 acquire_console_sem(); 2136 console_lock();
2137 2137
2138 /* 2138 /*
2139 * PCI state will have been restored by the core, so 2139 * PCI state will have been restored by the core, so
@@ -2161,7 +2161,7 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
2161 par->lock_blank = 0; 2161 par->lock_blank = 0;
2162 atyfb_blank(FB_BLANK_UNBLANK, info); 2162 atyfb_blank(FB_BLANK_UNBLANK, info);
2163 2163
2164 release_console_sem(); 2164 console_unlock();
2165 2165
2166 pdev->dev.power.power_state = PMSG_ON; 2166 pdev->dev.power.power_state = PMSG_ON;
2167 2167
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c4e17642d9c5..92bda5848516 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2626,7 +2626,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2626 goto done; 2626 goto done;
2627 } 2627 }
2628 2628
2629 acquire_console_sem(); 2629 console_lock();
2630 2630
2631 fb_set_suspend(info, 1); 2631 fb_set_suspend(info, 1);
2632 2632
@@ -2690,7 +2690,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
2690 if (rinfo->pm_mode & radeon_pm_d2) 2690 if (rinfo->pm_mode & radeon_pm_d2)
2691 radeon_set_suspend(rinfo, 1); 2691 radeon_set_suspend(rinfo, 1);
2692 2692
2693 release_console_sem(); 2693 console_unlock();
2694 2694
2695 done: 2695 done:
2696 pdev->dev.power.power_state = mesg; 2696 pdev->dev.power.power_state = mesg;
@@ -2715,10 +2715,10 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2715 return 0; 2715 return 0;
2716 2716
2717 if (rinfo->no_schedule) { 2717 if (rinfo->no_schedule) {
2718 if (try_acquire_console_sem()) 2718 if (!console_trylock())
2719 return 0; 2719 return 0;
2720 } else 2720 } else
2721 acquire_console_sem(); 2721 console_lock();
2722 2722
2723 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n", 2723 printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
2724 pci_name(pdev), pdev->dev.power.power_state.event); 2724 pci_name(pdev), pdev->dev.power.power_state.event);
@@ -2783,7 +2783,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2783 pdev->dev.power.power_state = PMSG_ON; 2783 pdev->dev.power.power_state = PMSG_ON;
2784 2784
2785 bail: 2785 bail:
2786 release_console_sem(); 2786 console_unlock();
2787 2787
2788 return rc; 2788 return rc;
2789} 2789}
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c789c46e38af..b224396b86d5 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -21,7 +21,7 @@
21#define MAX_BRIGHTNESS (0xFF) 21#define MAX_BRIGHTNESS (0xFF)
22#define MIN_BRIGHTNESS (0) 22#define MIN_BRIGHTNESS (0)
23 23
24#define CURRENT_MASK (0x1F << 1) 24#define CURRENT_BITMASK (0x1F << 1)
25 25
26struct pm860x_backlight_data { 26struct pm860x_backlight_data {
27 struct pm860x_chip *chip; 27 struct pm860x_chip *chip;
@@ -85,7 +85,7 @@ static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
85 if ((data->current_brightness == 0) && brightness) { 85 if ((data->current_brightness == 0) && brightness) {
86 if (data->iset) { 86 if (data->iset) {
87 ret = pm860x_set_bits(data->i2c, wled_idc(data->port), 87 ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
88 CURRENT_MASK, data->iset); 88 CURRENT_BITMASK, data->iset);
89 if (ret < 0) 89 if (ret < 0)
90 goto out; 90 goto out;
91 } 91 }
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 18c507874ff1..47c21fb2c82f 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -696,6 +696,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
696{ 696{
697 struct backlight_properties props; 697 struct backlight_properties props;
698 dma_addr_t dma_handle; 698 dma_addr_t dma_handle;
699 int ret;
699 700
700 if (request_dma(CH_PPI, KBUILD_MODNAME)) { 701 if (request_dma(CH_PPI, KBUILD_MODNAME)) {
701 pr_err("couldn't request PPI DMA\n"); 702 pr_err("couldn't request PPI DMA\n");
@@ -704,17 +705,16 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
704 705
705 if (request_ports()) { 706 if (request_ports()) {
706 pr_err("couldn't request gpio port\n"); 707 pr_err("couldn't request gpio port\n");
707 free_dma(CH_PPI); 708 ret = -EFAULT;
708 return -EFAULT; 709 goto out_ports;
709 } 710 }
710 711
711 fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, 712 fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE,
712 &dma_handle, GFP_KERNEL); 713 &dma_handle, GFP_KERNEL);
713 if (fb_buffer == NULL) { 714 if (fb_buffer == NULL) {
714 pr_err("couldn't allocate dma buffer\n"); 715 pr_err("couldn't allocate dma buffer\n");
715 free_dma(CH_PPI); 716 ret = -ENOMEM;
716 free_ports(); 717 goto out_dma_coherent;
717 return -ENOMEM;
718 } 718 }
719 719
720 if (L1_DATA_A_LENGTH) 720 if (L1_DATA_A_LENGTH)
@@ -725,10 +725,8 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
725 725
726 if (dma_desc_table == NULL) { 726 if (dma_desc_table == NULL) {
727 pr_err("couldn't allocate dma descriptor\n"); 727 pr_err("couldn't allocate dma descriptor\n");
728 free_dma(CH_PPI); 728 ret = -ENOMEM;
729 free_ports(); 729 goto out_table;
730 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
731 return -ENOMEM;
732 } 730 }
733 731
734 bfin_lq035_fb.screen_base = (void *)fb_buffer; 732 bfin_lq035_fb.screen_base = (void *)fb_buffer;
@@ -771,31 +769,21 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
771 bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL); 769 bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
772 if (bfin_lq035_fb.pseudo_palette == NULL) { 770 if (bfin_lq035_fb.pseudo_palette == NULL) {
773 pr_err("failed to allocate pseudo_palette\n"); 771 pr_err("failed to allocate pseudo_palette\n");
774 free_dma(CH_PPI); 772 ret = -ENOMEM;
775 free_ports(); 773 goto out_palette;
776 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
777 return -ENOMEM;
778 } 774 }
779 775
780 if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) { 776 if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
781 pr_err("failed to allocate colormap (%d entries)\n", 777 pr_err("failed to allocate colormap (%d entries)\n",
782 NBR_PALETTE); 778 NBR_PALETTE);
783 free_dma(CH_PPI); 779 ret = -EFAULT;
784 free_ports(); 780 goto out_cmap;
785 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
786 kfree(bfin_lq035_fb.pseudo_palette);
787 return -EFAULT;
788 } 781 }
789 782
790 if (register_framebuffer(&bfin_lq035_fb) < 0) { 783 if (register_framebuffer(&bfin_lq035_fb) < 0) {
791 pr_err("unable to register framebuffer\n"); 784 pr_err("unable to register framebuffer\n");
792 free_dma(CH_PPI); 785 ret = -EINVAL;
793 free_ports(); 786 goto out_reg;
794 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
795 fb_buffer = NULL;
796 kfree(bfin_lq035_fb.pseudo_palette);
797 fb_dealloc_cmap(&bfin_lq035_fb.cmap);
798 return -EINVAL;
799 } 787 }
800 788
801 i2c_add_driver(&ad5280_driver); 789 i2c_add_driver(&ad5280_driver);
@@ -807,11 +795,31 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
807 795
808 lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL, 796 lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL,
809 &bfin_lcd_ops); 797 &bfin_lcd_ops);
798 if (IS_ERR(lcd_dev)) {
799 pr_err("unable to register lcd\n");
800 ret = PTR_ERR(lcd_dev);
801 goto out_lcd;
802 }
810 lcd_dev->props.max_contrast = 255, 803 lcd_dev->props.max_contrast = 255,
811 804
812 pr_info("initialized"); 805 pr_info("initialized");
813 806
814 return 0; 807 return 0;
808out_lcd:
809 unregister_framebuffer(&bfin_lq035_fb);
810out_reg:
811 fb_dealloc_cmap(&bfin_lq035_fb.cmap);
812out_cmap:
813 kfree(bfin_lq035_fb.pseudo_palette);
814out_palette:
815out_table:
816 dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
817 fb_buffer = NULL;
818out_dma_coherent:
819 free_ports();
820out_ports:
821 free_dma(CH_PPI);
822 return ret;
815} 823}
816 824
817static int __devexit bfin_lq035_remove(struct platform_device *pdev) 825static int __devexit bfin_lq035_remove(struct platform_device *pdev)
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index d637e1f53172..cff742abdc5d 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -460,10 +460,10 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
460 if (!(state.event & PM_EVENT_SLEEP)) 460 if (!(state.event & PM_EVENT_SLEEP))
461 goto done; 461 goto done;
462 462
463 acquire_console_sem(); 463 console_lock();
464 chipsfb_blank(1, p); 464 chipsfb_blank(1, p);
465 fb_set_suspend(p, 1); 465 fb_set_suspend(p, 1);
466 release_console_sem(); 466 console_unlock();
467 done: 467 done:
468 pdev->dev.power.power_state = state; 468 pdev->dev.power.power_state = state;
469 return 0; 469 return 0;
@@ -473,10 +473,10 @@ static int chipsfb_pci_resume(struct pci_dev *pdev)
473{ 473{
474 struct fb_info *p = pci_get_drvdata(pdev); 474 struct fb_info *p = pci_get_drvdata(pdev);
475 475
476 acquire_console_sem(); 476 console_lock();
477 fb_set_suspend(p, 0); 477 fb_set_suspend(p, 0);
478 chipsfb_blank(0, p); 478 chipsfb_blank(0, p);
479 release_console_sem(); 479 console_unlock();
480 480
481 pdev->dev.power.power_state = PMSG_ON; 481 pdev->dev.power.power_state = PMSG_ON;
482 return 0; 482 return 0;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5a35f22372b9..2209e354f531 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -5,7 +5,7 @@
5menu "Console display driver support" 5menu "Console display driver support"
6 6
7config VGA_CONSOLE 7config VGA_CONSOLE
8 bool "VGA text console" if EMBEDDED || !X86 8 bool "VGA text console" if EXPERT || !X86
9 depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) 9 depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
10 default y 10 default y
11 help 11 help
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 7ccc967831f0..9c092b8d64e6 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -375,14 +375,14 @@ static void fb_flashcursor(struct work_struct *work)
375 int c; 375 int c;
376 int mode; 376 int mode;
377 377
378 acquire_console_sem(); 378 console_lock();
379 if (ops && ops->currcon != -1) 379 if (ops && ops->currcon != -1)
380 vc = vc_cons[ops->currcon].d; 380 vc = vc_cons[ops->currcon].d;
381 381
382 if (!vc || !CON_IS_VISIBLE(vc) || 382 if (!vc || !CON_IS_VISIBLE(vc) ||
383 registered_fb[con2fb_map[vc->vc_num]] != info || 383 registered_fb[con2fb_map[vc->vc_num]] != info ||
384 vc->vc_deccm != 1) { 384 vc->vc_deccm != 1) {
385 release_console_sem(); 385 console_unlock();
386 return; 386 return;
387 } 387 }
388 388
@@ -392,7 +392,7 @@ static void fb_flashcursor(struct work_struct *work)
392 CM_ERASE : CM_DRAW; 392 CM_ERASE : CM_DRAW;
393 ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), 393 ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
394 get_color(vc, info, c, 0)); 394 get_color(vc, info, c, 0));
395 release_console_sem(); 395 console_unlock();
396} 396}
397 397
398static void cursor_timer_handler(unsigned long dev_addr) 398static void cursor_timer_handler(unsigned long dev_addr)
@@ -836,7 +836,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
836 836
837 found = search_fb_in_map(newidx); 837 found = search_fb_in_map(newidx);
838 838
839 acquire_console_sem(); 839 console_lock();
840 con2fb_map[unit] = newidx; 840 con2fb_map[unit] = newidx;
841 if (!err && !found) 841 if (!err && !found)
842 err = con2fb_acquire_newinfo(vc, info, unit, oldidx); 842 err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -863,7 +863,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
863 if (!search_fb_in_map(info_idx)) 863 if (!search_fb_in_map(info_idx))
864 info_idx = newidx; 864 info_idx = newidx;
865 865
866 release_console_sem(); 866 console_unlock();
867 return err; 867 return err;
868} 868}
869 869
@@ -3321,7 +3321,7 @@ static ssize_t store_rotate(struct device *device,
3321 if (fbcon_has_exited) 3321 if (fbcon_has_exited)
3322 return count; 3322 return count;
3323 3323
3324 acquire_console_sem(); 3324 console_lock();
3325 idx = con2fb_map[fg_console]; 3325 idx = con2fb_map[fg_console];
3326 3326
3327 if (idx == -1 || registered_fb[idx] == NULL) 3327 if (idx == -1 || registered_fb[idx] == NULL)
@@ -3331,7 +3331,7 @@ static ssize_t store_rotate(struct device *device,
3331 rotate = simple_strtoul(buf, last, 0); 3331 rotate = simple_strtoul(buf, last, 0);
3332 fbcon_rotate(info, rotate); 3332 fbcon_rotate(info, rotate);
3333err: 3333err:
3334 release_console_sem(); 3334 console_unlock();
3335 return count; 3335 return count;
3336} 3336}
3337 3337
@@ -3346,7 +3346,7 @@ static ssize_t store_rotate_all(struct device *device,
3346 if (fbcon_has_exited) 3346 if (fbcon_has_exited)
3347 return count; 3347 return count;
3348 3348
3349 acquire_console_sem(); 3349 console_lock();
3350 idx = con2fb_map[fg_console]; 3350 idx = con2fb_map[fg_console];
3351 3351
3352 if (idx == -1 || registered_fb[idx] == NULL) 3352 if (idx == -1 || registered_fb[idx] == NULL)
@@ -3356,7 +3356,7 @@ static ssize_t store_rotate_all(struct device *device,
3356 rotate = simple_strtoul(buf, last, 0); 3356 rotate = simple_strtoul(buf, last, 0);
3357 fbcon_rotate_all(info, rotate); 3357 fbcon_rotate_all(info, rotate);
3358err: 3358err:
3359 release_console_sem(); 3359 console_unlock();
3360 return count; 3360 return count;
3361} 3361}
3362 3362
@@ -3369,7 +3369,7 @@ static ssize_t show_rotate(struct device *device,
3369 if (fbcon_has_exited) 3369 if (fbcon_has_exited)
3370 return 0; 3370 return 0;
3371 3371
3372 acquire_console_sem(); 3372 console_lock();
3373 idx = con2fb_map[fg_console]; 3373 idx = con2fb_map[fg_console];
3374 3374
3375 if (idx == -1 || registered_fb[idx] == NULL) 3375 if (idx == -1 || registered_fb[idx] == NULL)
@@ -3378,7 +3378,7 @@ static ssize_t show_rotate(struct device *device,
3378 info = registered_fb[idx]; 3378 info = registered_fb[idx];
3379 rotate = fbcon_get_rotate(info); 3379 rotate = fbcon_get_rotate(info);
3380err: 3380err:
3381 release_console_sem(); 3381 console_unlock();
3382 return snprintf(buf, PAGE_SIZE, "%d\n", rotate); 3382 return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
3383} 3383}
3384 3384
@@ -3392,7 +3392,7 @@ static ssize_t show_cursor_blink(struct device *device,
3392 if (fbcon_has_exited) 3392 if (fbcon_has_exited)
3393 return 0; 3393 return 0;
3394 3394
3395 acquire_console_sem(); 3395 console_lock();
3396 idx = con2fb_map[fg_console]; 3396 idx = con2fb_map[fg_console];
3397 3397
3398 if (idx == -1 || registered_fb[idx] == NULL) 3398 if (idx == -1 || registered_fb[idx] == NULL)
@@ -3406,7 +3406,7 @@ static ssize_t show_cursor_blink(struct device *device,
3406 3406
3407 blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0; 3407 blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
3408err: 3408err:
3409 release_console_sem(); 3409 console_unlock();
3410 return snprintf(buf, PAGE_SIZE, "%d\n", blink); 3410 return snprintf(buf, PAGE_SIZE, "%d\n", blink);
3411} 3411}
3412 3412
@@ -3421,7 +3421,7 @@ static ssize_t store_cursor_blink(struct device *device,
3421 if (fbcon_has_exited) 3421 if (fbcon_has_exited)
3422 return count; 3422 return count;
3423 3423
3424 acquire_console_sem(); 3424 console_lock();
3425 idx = con2fb_map[fg_console]; 3425 idx = con2fb_map[fg_console];
3426 3426
3427 if (idx == -1 || registered_fb[idx] == NULL) 3427 if (idx == -1 || registered_fb[idx] == NULL)
@@ -3443,7 +3443,7 @@ static ssize_t store_cursor_blink(struct device *device,
3443 } 3443 }
3444 3444
3445err: 3445err:
3446 release_console_sem(); 3446 console_unlock();
3447 return count; 3447 return count;
3448} 3448}
3449 3449
@@ -3482,7 +3482,7 @@ static void fbcon_start(void)
3482 if (num_registered_fb) { 3482 if (num_registered_fb) {
3483 int i; 3483 int i;
3484 3484
3485 acquire_console_sem(); 3485 console_lock();
3486 3486
3487 for (i = 0; i < FB_MAX; i++) { 3487 for (i = 0; i < FB_MAX; i++) {
3488 if (registered_fb[i] != NULL) { 3488 if (registered_fb[i] != NULL) {
@@ -3491,7 +3491,7 @@ static void fbcon_start(void)
3491 } 3491 }
3492 } 3492 }
3493 3493
3494 release_console_sem(); 3494 console_unlock();
3495 fbcon_takeover(0); 3495 fbcon_takeover(0);
3496 } 3496 }
3497} 3497}
@@ -3552,7 +3552,7 @@ static int __init fb_console_init(void)
3552{ 3552{
3553 int i; 3553 int i;
3554 3554
3555 acquire_console_sem(); 3555 console_lock();
3556 fb_register_client(&fbcon_event_notifier); 3556 fb_register_client(&fbcon_event_notifier);
3557 fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL, 3557 fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
3558 "fbcon"); 3558 "fbcon");
@@ -3568,7 +3568,7 @@ static int __init fb_console_init(void)
3568 for (i = 0; i < MAX_NR_CONSOLES; i++) 3568 for (i = 0; i < MAX_NR_CONSOLES; i++)
3569 con2fb_map[i] = -1; 3569 con2fb_map[i] = -1;
3570 3570
3571 release_console_sem(); 3571 console_unlock();
3572 fbcon_start(); 3572 fbcon_start();
3573 return 0; 3573 return 0;
3574} 3574}
@@ -3591,12 +3591,12 @@ static void __exit fbcon_deinit_device(void)
3591 3591
3592static void __exit fb_console_exit(void) 3592static void __exit fb_console_exit(void)
3593{ 3593{
3594 acquire_console_sem(); 3594 console_lock();
3595 fb_unregister_client(&fbcon_event_notifier); 3595 fb_unregister_client(&fbcon_event_notifier);
3596 fbcon_deinit_device(); 3596 fbcon_deinit_device();
3597 device_destroy(fb_class, MKDEV(0, 0)); 3597 device_destroy(fb_class, MKDEV(0, 0));
3598 fbcon_exit(); 3598 fbcon_exit();
3599 release_console_sem(); 3599 console_unlock();
3600 unregister_con_driver(&fb_con); 3600 unregister_con_driver(&fb_con);
3601} 3601}
3602 3602
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index c97491b8b39b..915fd74da7a2 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -202,11 +202,7 @@ static void vgacon_scrollback_init(int pitch)
202 } 202 }
203} 203}
204 204
205/* 205static void vgacon_scrollback_startup(void)
206 * Called only duing init so call of alloc_bootmen is ok.
207 * Marked __init_refok to silence modpost.
208 */
209static void __init_refok vgacon_scrollback_startup(void)
210{ 206{
211 vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT); 207 vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
212 vgacon_scrollback_init(vga_video_num_columns * 2); 208 vgacon_scrollback_init(vga_video_num_columns * 2);
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index c265aed09e04..8d61ef96eedd 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1092,9 +1092,10 @@ static int __init fb_probe(struct platform_device *device)
1092 1092
1093irq_freq: 1093irq_freq:
1094#ifdef CONFIG_CPU_FREQ 1094#ifdef CONFIG_CPU_FREQ
1095 lcd_da8xx_cpufreq_deregister(par);
1096#endif
1095err_cpu_freq: 1097err_cpu_freq:
1096 unregister_framebuffer(da8xx_fb_info); 1098 unregister_framebuffer(da8xx_fb_info);
1097#endif
1098 1099
1099err_dealloc_cmap: 1100err_dealloc_cmap:
1100 fb_dealloc_cmap(&da8xx_fb_info->cmap); 1101 fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -1130,14 +1131,14 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
1130 struct fb_info *info = platform_get_drvdata(dev); 1131 struct fb_info *info = platform_get_drvdata(dev);
1131 struct da8xx_fb_par *par = info->par; 1132 struct da8xx_fb_par *par = info->par;
1132 1133
1133 acquire_console_sem(); 1134 console_lock();
1134 if (par->panel_power_ctrl) 1135 if (par->panel_power_ctrl)
1135 par->panel_power_ctrl(0); 1136 par->panel_power_ctrl(0);
1136 1137
1137 fb_set_suspend(info, 1); 1138 fb_set_suspend(info, 1);
1138 lcd_disable_raster(); 1139 lcd_disable_raster();
1139 clk_disable(par->lcdc_clk); 1140 clk_disable(par->lcdc_clk);
1140 release_console_sem(); 1141 console_unlock();
1141 1142
1142 return 0; 1143 return 0;
1143} 1144}
@@ -1146,14 +1147,14 @@ static int fb_resume(struct platform_device *dev)
1146 struct fb_info *info = platform_get_drvdata(dev); 1147 struct fb_info *info = platform_get_drvdata(dev);
1147 struct da8xx_fb_par *par = info->par; 1148 struct da8xx_fb_par *par = info->par;
1148 1149
1149 acquire_console_sem(); 1150 console_lock();
1150 if (par->panel_power_ctrl) 1151 if (par->panel_power_ctrl)
1151 par->panel_power_ctrl(1); 1152 par->panel_power_ctrl(1);
1152 1153
1153 clk_enable(par->lcdc_clk); 1154 clk_enable(par->lcdc_clk);
1154 lcd_enable_raster(); 1155 lcd_enable_raster();
1155 fb_set_suspend(info, 0); 1156 fb_set_suspend(info, 0);
1156 release_console_sem(); 1157 console_unlock();
1157 1158
1158 return 0; 1159 return 0;
1159} 1160}
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 0c99de0562ca..b358d045f130 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -483,7 +483,7 @@ static void ep93xxfb_dealloc_videomem(struct fb_info *info)
483 info->screen_base, info->fix.smem_start); 483 info->screen_base, info->fix.smem_start);
484} 484}
485 485
486static int __init ep93xxfb_probe(struct platform_device *pdev) 486static int __devinit ep93xxfb_probe(struct platform_device *pdev)
487{ 487{
488 struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data; 488 struct ep93xxfb_mach_info *mach_info = pdev->dev.platform_data;
489 struct fb_info *info; 489 struct fb_info *info;
@@ -598,7 +598,7 @@ failed:
598 return err; 598 return err;
599} 599}
600 600
601static int ep93xxfb_remove(struct platform_device *pdev) 601static int __devexit ep93xxfb_remove(struct platform_device *pdev)
602{ 602{
603 struct fb_info *info = platform_get_drvdata(pdev); 603 struct fb_info *info = platform_get_drvdata(pdev);
604 struct ep93xx_fbi *fbi = info->par; 604 struct ep93xx_fbi *fbi = info->par;
@@ -622,7 +622,7 @@ static int ep93xxfb_remove(struct platform_device *pdev)
622 622
623static struct platform_driver ep93xxfb_driver = { 623static struct platform_driver ep93xxfb_driver = {
624 .probe = ep93xxfb_probe, 624 .probe = ep93xxfb_probe,
625 .remove = ep93xxfb_remove, 625 .remove = __devexit_p(ep93xxfb_remove),
626 .driver = { 626 .driver = {
627 .name = "ep93xx-fb", 627 .name = "ep93xx-fb",
628 .owner = THIS_MODULE, 628 .owner = THIS_MODULE,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 4ac1201ad6c2..e2bf95370e40 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1036,11 +1036,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1036 return -EFAULT; 1036 return -EFAULT;
1037 if (!lock_fb_info(info)) 1037 if (!lock_fb_info(info))
1038 return -ENODEV; 1038 return -ENODEV;
1039 acquire_console_sem(); 1039 console_lock();
1040 info->flags |= FBINFO_MISC_USEREVENT; 1040 info->flags |= FBINFO_MISC_USEREVENT;
1041 ret = fb_set_var(info, &var); 1041 ret = fb_set_var(info, &var);
1042 info->flags &= ~FBINFO_MISC_USEREVENT; 1042 info->flags &= ~FBINFO_MISC_USEREVENT;
1043 release_console_sem(); 1043 console_unlock();
1044 unlock_fb_info(info); 1044 unlock_fb_info(info);
1045 if (!ret && copy_to_user(argp, &var, sizeof(var))) 1045 if (!ret && copy_to_user(argp, &var, sizeof(var)))
1046 ret = -EFAULT; 1046 ret = -EFAULT;
@@ -1072,9 +1072,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1072 return -EFAULT; 1072 return -EFAULT;
1073 if (!lock_fb_info(info)) 1073 if (!lock_fb_info(info))
1074 return -ENODEV; 1074 return -ENODEV;
1075 acquire_console_sem(); 1075 console_lock();
1076 ret = fb_pan_display(info, &var); 1076 ret = fb_pan_display(info, &var);
1077 release_console_sem(); 1077 console_unlock();
1078 unlock_fb_info(info); 1078 unlock_fb_info(info);
1079 if (ret == 0 && copy_to_user(argp, &var, sizeof(var))) 1079 if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
1080 return -EFAULT; 1080 return -EFAULT;
@@ -1119,11 +1119,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1119 case FBIOBLANK: 1119 case FBIOBLANK:
1120 if (!lock_fb_info(info)) 1120 if (!lock_fb_info(info))
1121 return -ENODEV; 1121 return -ENODEV;
1122 acquire_console_sem(); 1122 console_lock();
1123 info->flags |= FBINFO_MISC_USEREVENT; 1123 info->flags |= FBINFO_MISC_USEREVENT;
1124 ret = fb_blank(info, arg); 1124 ret = fb_blank(info, arg);
1125 info->flags &= ~FBINFO_MISC_USEREVENT; 1125 info->flags &= ~FBINFO_MISC_USEREVENT;
1126 release_console_sem(); 1126 console_unlock();
1127 unlock_fb_info(info); 1127 unlock_fb_info(info);
1128 break; 1128 break;
1129 default: 1129 default:
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 0a08f1341227..f4a32779168b 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -90,11 +90,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
90 int err; 90 int err;
91 91
92 var->activate |= FB_ACTIVATE_FORCE; 92 var->activate |= FB_ACTIVATE_FORCE;
93 acquire_console_sem(); 93 console_lock();
94 fb_info->flags |= FBINFO_MISC_USEREVENT; 94 fb_info->flags |= FBINFO_MISC_USEREVENT;
95 err = fb_set_var(fb_info, var); 95 err = fb_set_var(fb_info, var);
96 fb_info->flags &= ~FBINFO_MISC_USEREVENT; 96 fb_info->flags &= ~FBINFO_MISC_USEREVENT;
97 release_console_sem(); 97 console_unlock();
98 if (err) 98 if (err)
99 return err; 99 return err;
100 return 0; 100 return 0;
@@ -175,7 +175,7 @@ static ssize_t store_modes(struct device *device,
175 if (i * sizeof(struct fb_videomode) != count) 175 if (i * sizeof(struct fb_videomode) != count)
176 return -EINVAL; 176 return -EINVAL;
177 177
178 acquire_console_sem(); 178 console_lock();
179 list_splice(&fb_info->modelist, &old_list); 179 list_splice(&fb_info->modelist, &old_list);
180 fb_videomode_to_modelist((const struct fb_videomode *)buf, i, 180 fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
181 &fb_info->modelist); 181 &fb_info->modelist);
@@ -185,7 +185,7 @@ static ssize_t store_modes(struct device *device,
185 } else 185 } else
186 fb_destroy_modelist(&old_list); 186 fb_destroy_modelist(&old_list);
187 187
188 release_console_sem(); 188 console_unlock();
189 189
190 return 0; 190 return 0;
191} 191}
@@ -301,11 +301,11 @@ static ssize_t store_blank(struct device *device,
301 char *last = NULL; 301 char *last = NULL;
302 int err; 302 int err;
303 303
304 acquire_console_sem(); 304 console_lock();
305 fb_info->flags |= FBINFO_MISC_USEREVENT; 305 fb_info->flags |= FBINFO_MISC_USEREVENT;
306 err = fb_blank(fb_info, simple_strtoul(buf, &last, 0)); 306 err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
307 fb_info->flags &= ~FBINFO_MISC_USEREVENT; 307 fb_info->flags &= ~FBINFO_MISC_USEREVENT;
308 release_console_sem(); 308 console_unlock();
309 if (err < 0) 309 if (err < 0)
310 return err; 310 return err;
311 return count; 311 return count;
@@ -364,9 +364,9 @@ static ssize_t store_pan(struct device *device,
364 return -EINVAL; 364 return -EINVAL;
365 var.yoffset = simple_strtoul(last, &last, 0); 365 var.yoffset = simple_strtoul(last, &last, 0);
366 366
367 acquire_console_sem(); 367 console_lock();
368 err = fb_pan_display(fb_info, &var); 368 err = fb_pan_display(fb_info, &var);
369 release_console_sem(); 369 console_unlock();
370 370
371 if (err < 0) 371 if (err < 0)
372 return err; 372 return err;
@@ -399,9 +399,9 @@ static ssize_t store_fbstate(struct device *device,
399 399
400 state = simple_strtoul(buf, &last, 0); 400 state = simple_strtoul(buf, &last, 0);
401 401
402 acquire_console_sem(); 402 console_lock();
403 fb_set_suspend(fb_info, (int)state); 403 fb_set_suspend(fb_info, (int)state);
404 release_console_sem(); 404 console_unlock();
405 405
406 return count; 406 return count;
407} 407}
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 70b1d9d51c96..b4f19db9bb54 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -344,10 +344,10 @@ static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
344 struct fb_info *info = pci_get_drvdata(pdev); 344 struct fb_info *info = pci_get_drvdata(pdev);
345 345
346 if (state.event == PM_EVENT_SUSPEND) { 346 if (state.event == PM_EVENT_SUSPEND) {
347 acquire_console_sem(); 347 console_lock();
348 gx_powerdown(info); 348 gx_powerdown(info);
349 fb_set_suspend(info, 1); 349 fb_set_suspend(info, 1);
350 release_console_sem(); 350 console_unlock();
351 } 351 }
352 352
353 /* there's no point in setting PCI states; we emulate PCI, so 353 /* there's no point in setting PCI states; we emulate PCI, so
@@ -361,7 +361,7 @@ static int gxfb_resume(struct pci_dev *pdev)
361 struct fb_info *info = pci_get_drvdata(pdev); 361 struct fb_info *info = pci_get_drvdata(pdev);
362 int ret; 362 int ret;
363 363
364 acquire_console_sem(); 364 console_lock();
365 ret = gx_powerup(info); 365 ret = gx_powerup(info);
366 if (ret) { 366 if (ret) {
367 printk(KERN_ERR "gxfb: power up failed!\n"); 367 printk(KERN_ERR "gxfb: power up failed!\n");
@@ -369,7 +369,7 @@ static int gxfb_resume(struct pci_dev *pdev)
369 } 369 }
370 370
371 fb_set_suspend(info, 0); 371 fb_set_suspend(info, 0);
372 release_console_sem(); 372 console_unlock();
373 return 0; 373 return 0;
374} 374}
375#endif 375#endif
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 39bdbedf43b4..416851ca8754 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -465,10 +465,10 @@ static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
465 struct fb_info *info = pci_get_drvdata(pdev); 465 struct fb_info *info = pci_get_drvdata(pdev);
466 466
467 if (state.event == PM_EVENT_SUSPEND) { 467 if (state.event == PM_EVENT_SUSPEND) {
468 acquire_console_sem(); 468 console_lock();
469 lx_powerdown(info); 469 lx_powerdown(info);
470 fb_set_suspend(info, 1); 470 fb_set_suspend(info, 1);
471 release_console_sem(); 471 console_unlock();
472 } 472 }
473 473
474 /* there's no point in setting PCI states; we emulate PCI, so 474 /* there's no point in setting PCI states; we emulate PCI, so
@@ -482,7 +482,7 @@ static int lxfb_resume(struct pci_dev *pdev)
482 struct fb_info *info = pci_get_drvdata(pdev); 482 struct fb_info *info = pci_get_drvdata(pdev);
483 int ret; 483 int ret;
484 484
485 acquire_console_sem(); 485 console_lock();
486 ret = lx_powerup(info); 486 ret = lx_powerup(info);
487 if (ret) { 487 if (ret) {
488 printk(KERN_ERR "lxfb: power up failed!\n"); 488 printk(KERN_ERR "lxfb: power up failed!\n");
@@ -490,7 +490,7 @@ static int lxfb_resume(struct pci_dev *pdev)
490 } 490 }
491 491
492 fb_set_suspend(info, 0); 492 fb_set_suspend(info, 0);
493 release_console_sem(); 493 console_unlock();
494 return 0; 494 return 0;
495} 495}
496#else 496#else
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 5743ea25e818..318f6fb895b2 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1574,7 +1574,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
1574 return 0; 1574 return 0;
1575 } 1575 }
1576 1576
1577 acquire_console_sem(); 1577 console_lock();
1578 fb_set_suspend(info, 1); 1578 fb_set_suspend(info, 1);
1579 1579
1580 if (info->fbops->fb_sync) 1580 if (info->fbops->fb_sync)
@@ -1587,7 +1587,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
1587 pci_save_state(dev); 1587 pci_save_state(dev);
1588 pci_disable_device(dev); 1588 pci_disable_device(dev);
1589 pci_set_power_state(dev, pci_choose_state(dev, mesg)); 1589 pci_set_power_state(dev, pci_choose_state(dev, mesg));
1590 release_console_sem(); 1590 console_unlock();
1591 1591
1592 return 0; 1592 return 0;
1593} 1593}
@@ -1605,7 +1605,7 @@ static int i810fb_resume(struct pci_dev *dev)
1605 return 0; 1605 return 0;
1606 } 1606 }
1607 1607
1608 acquire_console_sem(); 1608 console_lock();
1609 pci_set_power_state(dev, PCI_D0); 1609 pci_set_power_state(dev, PCI_D0);
1610 pci_restore_state(dev); 1610 pci_restore_state(dev);
1611 1611
@@ -1621,7 +1621,7 @@ static int i810fb_resume(struct pci_dev *dev)
1621 fb_set_suspend (info, 0); 1621 fb_set_suspend (info, 0);
1622 info->fbops->fb_blank(VESA_NO_BLANKING, info); 1622 info->fbops->fb_blank(VESA_NO_BLANKING, info);
1623fail: 1623fail:
1624 release_console_sem(); 1624 console_unlock();
1625 return 0; 1625 return 0;
1626} 1626}
1627/*********************************************************************** 1627/***********************************************************************
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 670ecaa0385a..de366937c933 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -778,9 +778,9 @@ static int jzfb_suspend(struct device *dev)
778{ 778{
779 struct jzfb *jzfb = dev_get_drvdata(dev); 779 struct jzfb *jzfb = dev_get_drvdata(dev);
780 780
781 acquire_console_sem(); 781 console_lock();
782 fb_set_suspend(jzfb->fb, 1); 782 fb_set_suspend(jzfb->fb, 1);
783 release_console_sem(); 783 console_unlock();
784 784
785 mutex_lock(&jzfb->lock); 785 mutex_lock(&jzfb->lock);
786 if (jzfb->is_enabled) 786 if (jzfb->is_enabled)
@@ -800,9 +800,9 @@ static int jzfb_resume(struct device *dev)
800 jzfb_enable(jzfb); 800 jzfb_enable(jzfb);
801 mutex_unlock(&jzfb->lock); 801 mutex_unlock(&jzfb->lock);
802 802
803 acquire_console_sem(); 803 console_lock();
804 fb_set_suspend(jzfb->fb, 0); 804 fb_set_suspend(jzfb->fb, 0);
805 release_console_sem(); 805 console_unlock();
806 806
807 return 0; 807 return 0;
808} 808}
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cb013919e9ce..7e3a490e8d76 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1177,9 +1177,9 @@ static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
1177 struct mx3fb_data *mx3fb = platform_get_drvdata(pdev); 1177 struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
1178 struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; 1178 struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
1179 1179
1180 acquire_console_sem(); 1180 console_lock();
1181 fb_set_suspend(mx3fb->fbi, 1); 1181 fb_set_suspend(mx3fb->fbi, 1);
1182 release_console_sem(); 1182 console_unlock();
1183 1183
1184 if (mx3_fbi->blank == FB_BLANK_UNBLANK) { 1184 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1185 sdc_disable_channel(mx3_fbi); 1185 sdc_disable_channel(mx3_fbi);
@@ -1202,9 +1202,9 @@ static int mx3fb_resume(struct platform_device *pdev)
1202 sdc_set_brightness(mx3fb, mx3fb->backlight_level); 1202 sdc_set_brightness(mx3fb, mx3fb->backlight_level);
1203 } 1203 }
1204 1204
1205 acquire_console_sem(); 1205 console_lock();
1206 fb_set_suspend(mx3fb->fbi, 0); 1206 fb_set_suspend(mx3fb->fbi, 0);
1207 release_console_sem(); 1207 console_unlock();
1208 1208
1209 return 0; 1209 return 0;
1210} 1210}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 62498bd662fc..f838d9e277f0 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -696,6 +696,8 @@ static int nuc900fb_remove(struct platform_device *pdev)
696 nuc900fb_stop_lcd(fbinfo); 696 nuc900fb_stop_lcd(fbinfo);
697 msleep(1); 697 msleep(1);
698 698
699 unregister_framebuffer(fbinfo);
700 nuc900fb_cpufreq_deregister(fbi);
699 nuc900fb_unmap_video_memory(fbinfo); 701 nuc900fb_unmap_video_memory(fbinfo);
700 702
701 iounmap(fbi->io); 703 iounmap(fbi->io);
@@ -723,7 +725,7 @@ static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state)
723 struct fb_info *fbinfo = platform_get_drvdata(dev); 725 struct fb_info *fbinfo = platform_get_drvdata(dev);
724 struct nuc900fb_info *info = fbinfo->par; 726 struct nuc900fb_info *info = fbinfo->par;
725 727
726 nuc900fb_stop_lcd(); 728 nuc900fb_stop_lcd(fbinfo);
727 msleep(1); 729 msleep(1);
728 clk_disable(info->clk); 730 clk_disable(info->clk);
729 return 0; 731 return 0;
@@ -740,7 +742,7 @@ static int nuc900fb_resume(struct platform_device *dev)
740 msleep(1); 742 msleep(1);
741 743
742 nuc900fb_init_registers(fbinfo); 744 nuc900fb_init_registers(fbinfo);
743 nuc900fb_activate_var(bfinfo); 745 nuc900fb_activate_var(fbinfo);
744 746
745 return 0; 747 return 0;
746} 748}
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index efe10ff86d63..081dc4745274 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1057,7 +1057,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
1057 1057
1058 if (mesg.event == PM_EVENT_PRETHAW) 1058 if (mesg.event == PM_EVENT_PRETHAW)
1059 mesg.event = PM_EVENT_FREEZE; 1059 mesg.event = PM_EVENT_FREEZE;
1060 acquire_console_sem(); 1060 console_lock();
1061 par->pm_state = mesg.event; 1061 par->pm_state = mesg.event;
1062 1062
1063 if (mesg.event & PM_EVENT_SLEEP) { 1063 if (mesg.event & PM_EVENT_SLEEP) {
@@ -1070,7 +1070,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
1070 } 1070 }
1071 dev->dev.power.power_state = mesg; 1071 dev->dev.power.power_state = mesg;
1072 1072
1073 release_console_sem(); 1073 console_unlock();
1074 return 0; 1074 return 0;
1075} 1075}
1076 1076
@@ -1079,7 +1079,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
1079 struct fb_info *info = pci_get_drvdata(dev); 1079 struct fb_info *info = pci_get_drvdata(dev);
1080 struct nvidia_par *par = info->par; 1080 struct nvidia_par *par = info->par;
1081 1081
1082 acquire_console_sem(); 1082 console_lock();
1083 pci_set_power_state(dev, PCI_D0); 1083 pci_set_power_state(dev, PCI_D0);
1084 1084
1085 if (par->pm_state != PM_EVENT_FREEZE) { 1085 if (par->pm_state != PM_EVENT_FREEZE) {
@@ -1097,7 +1097,7 @@ static int nvidiafb_resume(struct pci_dev *dev)
1097 nvidiafb_blank(FB_BLANK_UNBLANK, info); 1097 nvidiafb_blank(FB_BLANK_UNBLANK, info);
1098 1098
1099fail: 1099fail:
1100 release_console_sem(); 1100 console_unlock();
1101 return 0; 1101 return 0;
1102} 1102}
1103#else 1103#else
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9c0144ee7ae5..65560a1a0439 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -513,9 +513,9 @@ static int ps3fb_release(struct fb_info *info, int user)
513 if (atomic_dec_and_test(&ps3fb.f_count)) { 513 if (atomic_dec_and_test(&ps3fb.f_count)) {
514 if (atomic_read(&ps3fb.ext_flip)) { 514 if (atomic_read(&ps3fb.ext_flip)) {
515 atomic_set(&ps3fb.ext_flip, 0); 515 atomic_set(&ps3fb.ext_flip, 0);
516 if (!try_acquire_console_sem()) { 516 if (console_trylock()) {
517 ps3fb_sync(info, 0); /* single buffer */ 517 ps3fb_sync(info, 0); /* single buffer */
518 release_console_sem(); 518 console_unlock();
519 } 519 }
520 } 520 }
521 } 521 }
@@ -830,14 +830,14 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
830 if (vmode) { 830 if (vmode) {
831 var = info->var; 831 var = info->var;
832 fb_videomode_to_var(&var, vmode); 832 fb_videomode_to_var(&var, vmode);
833 acquire_console_sem(); 833 console_lock();
834 info->flags |= FBINFO_MISC_USEREVENT; 834 info->flags |= FBINFO_MISC_USEREVENT;
835 /* Force, in case only special bits changed */ 835 /* Force, in case only special bits changed */
836 var.activate |= FB_ACTIVATE_FORCE; 836 var.activate |= FB_ACTIVATE_FORCE;
837 par->new_mode_id = val; 837 par->new_mode_id = val;
838 retval = fb_set_var(info, &var); 838 retval = fb_set_var(info, &var);
839 info->flags &= ~FBINFO_MISC_USEREVENT; 839 info->flags &= ~FBINFO_MISC_USEREVENT;
840 release_console_sem(); 840 console_unlock();
841 } 841 }
842 break; 842 break;
843 } 843 }
@@ -881,9 +881,9 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
881 break; 881 break;
882 882
883 dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val); 883 dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val);
884 acquire_console_sem(); 884 console_lock();
885 retval = ps3fb_sync(info, val); 885 retval = ps3fb_sync(info, val);
886 release_console_sem(); 886 console_unlock();
887 break; 887 break;
888 888
889 default: 889 default:
@@ -903,9 +903,9 @@ static int ps3fbd(void *arg)
903 set_current_state(TASK_INTERRUPTIBLE); 903 set_current_state(TASK_INTERRUPTIBLE);
904 if (ps3fb.is_kicked) { 904 if (ps3fb.is_kicked) {
905 ps3fb.is_kicked = 0; 905 ps3fb.is_kicked = 0;
906 acquire_console_sem(); 906 console_lock();
907 ps3fb_sync(info, 0); /* single buffer */ 907 ps3fb_sync(info, 0); /* single buffer */
908 release_console_sem(); 908 console_unlock();
909 } 909 }
910 schedule(); 910 schedule();
911 } 911 }
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index cea6403ae71c..35f61dd0cb3a 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -701,16 +701,12 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
701 */ 701 */
702 pxa168fb_init_mode(info, mi); 702 pxa168fb_init_mode(info, mi);
703 703
704 ret = pxa168fb_check_var(&info->var, info);
705 if (ret)
706 goto failed_free_fbmem;
707
708 /* 704 /*
709 * Fill in sane defaults. 705 * Fill in sane defaults.
710 */ 706 */
711 ret = pxa168fb_check_var(&info->var, info); 707 ret = pxa168fb_check_var(&info->var, info);
712 if (ret) 708 if (ret)
713 goto failed; 709 goto failed_free_fbmem;
714 710
715 /* 711 /*
716 * enable controller clock 712 * enable controller clock
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index b81168df253d..cf4beb9dc9bb 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * pxa3xx-gc.c - Linux kernel module for PXA3xx graphics controllers 2 * pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
3 * 3 *
4 * This driver needs a DirectFB counterpart in user space, communication 4 * This driver needs a DirectFB counterpart in user space, communication
5 * is handled via mmap()ed memory areas and an ioctl. 5 * is handled via mmap()ed memory areas and an ioctl.
@@ -421,7 +421,7 @@ pxa3xx_gcu_misc_write(struct file *filp, const char *buff,
421 buffer->next = priv->free; 421 buffer->next = priv->free;
422 priv->free = buffer; 422 priv->free = buffer;
423 spin_unlock_irqrestore(&priv->spinlock, flags); 423 spin_unlock_irqrestore(&priv->spinlock, flags);
424 return ret; 424 return -EFAULT;
425 } 425 }
426 426
427 buffer->length = words; 427 buffer->length = words;
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index dce8c97b4333..75738a928610 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -22,7 +22,7 @@
22#include <linux/svga.h> 22#include <linux/svga.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */ 25#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
26#include <video/vga.h> 26#include <video/vga.h>
27 27
28#ifdef CONFIG_MTRR 28#ifdef CONFIG_MTRR
@@ -1113,12 +1113,12 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
1113 1113
1114 dev_info(info->device, "suspend\n"); 1114 dev_info(info->device, "suspend\n");
1115 1115
1116 acquire_console_sem(); 1116 console_lock();
1117 mutex_lock(&(par->open_lock)); 1117 mutex_lock(&(par->open_lock));
1118 1118
1119 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { 1119 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1120 mutex_unlock(&(par->open_lock)); 1120 mutex_unlock(&(par->open_lock));
1121 release_console_sem(); 1121 console_unlock();
1122 return 0; 1122 return 0;
1123 } 1123 }
1124 1124
@@ -1129,7 +1129,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
1129 pci_set_power_state(dev, pci_choose_state(dev, state)); 1129 pci_set_power_state(dev, pci_choose_state(dev, state));
1130 1130
1131 mutex_unlock(&(par->open_lock)); 1131 mutex_unlock(&(par->open_lock));
1132 release_console_sem(); 1132 console_unlock();
1133 1133
1134 return 0; 1134 return 0;
1135} 1135}
@@ -1145,12 +1145,12 @@ static int s3_pci_resume(struct pci_dev* dev)
1145 1145
1146 dev_info(info->device, "resume\n"); 1146 dev_info(info->device, "resume\n");
1147 1147
1148 acquire_console_sem(); 1148 console_lock();
1149 mutex_lock(&(par->open_lock)); 1149 mutex_lock(&(par->open_lock));
1150 1150
1151 if (par->ref_count == 0) { 1151 if (par->ref_count == 0) {
1152 mutex_unlock(&(par->open_lock)); 1152 mutex_unlock(&(par->open_lock));
1153 release_console_sem(); 1153 console_unlock();
1154 return 0; 1154 return 0;
1155 } 1155 }
1156 1156
@@ -1159,7 +1159,7 @@ static int s3_pci_resume(struct pci_dev* dev)
1159 err = pci_enable_device(dev); 1159 err = pci_enable_device(dev);
1160 if (err) { 1160 if (err) {
1161 mutex_unlock(&(par->open_lock)); 1161 mutex_unlock(&(par->open_lock));
1162 release_console_sem(); 1162 console_unlock();
1163 dev_err(info->device, "error %d enabling device for resume\n", err); 1163 dev_err(info->device, "error %d enabling device for resume\n", err);
1164 return err; 1164 return err;
1165 } 1165 }
@@ -1169,7 +1169,7 @@ static int s3_pci_resume(struct pci_dev* dev)
1169 fb_set_suspend(info, 0); 1169 fb_set_suspend(info, 0);
1170 1170
1171 mutex_unlock(&(par->open_lock)); 1171 mutex_unlock(&(par->open_lock));
1172 release_console_sem(); 1172 console_unlock();
1173 1173
1174 return 0; 1174 return 0;
1175} 1175}
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 842d157e1025..487911e2926c 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2373,7 +2373,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
2373 if (mesg.event == PM_EVENT_FREEZE) 2373 if (mesg.event == PM_EVENT_FREEZE)
2374 return 0; 2374 return 0;
2375 2375
2376 acquire_console_sem(); 2376 console_lock();
2377 fb_set_suspend(info, 1); 2377 fb_set_suspend(info, 1);
2378 2378
2379 if (info->fbops->fb_sync) 2379 if (info->fbops->fb_sync)
@@ -2385,7 +2385,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
2385 pci_save_state(dev); 2385 pci_save_state(dev);
2386 pci_disable_device(dev); 2386 pci_disable_device(dev);
2387 pci_set_power_state(dev, pci_choose_state(dev, mesg)); 2387 pci_set_power_state(dev, pci_choose_state(dev, mesg));
2388 release_console_sem(); 2388 console_unlock();
2389 2389
2390 return 0; 2390 return 0;
2391} 2391}
@@ -2409,7 +2409,7 @@ static int savagefb_resume(struct pci_dev* dev)
2409 return 0; 2409 return 0;
2410 } 2410 }
2411 2411
2412 acquire_console_sem(); 2412 console_lock();
2413 2413
2414 pci_set_power_state(dev, PCI_D0); 2414 pci_set_power_state(dev, PCI_D0);
2415 pci_restore_state(dev); 2415 pci_restore_state(dev);
@@ -2423,7 +2423,7 @@ static int savagefb_resume(struct pci_dev* dev)
2423 savagefb_set_par(info); 2423 savagefb_set_par(info);
2424 fb_set_suspend(info, 0); 2424 fb_set_suspend(info, 0);
2425 savagefb_blank(FB_BLANK_UNBLANK, info); 2425 savagefb_blank(FB_BLANK_UNBLANK, info);
2426 release_console_sem(); 2426 console_unlock();
2427 2427
2428 return 0; 2428 return 0;
2429} 2429}
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 74d9f546a2e8..2b9e56a6bde4 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1151,7 +1151,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1151 1151
1152 ch = info->par; 1152 ch = info->par;
1153 1153
1154 acquire_console_sem(); 1154 console_lock();
1155 1155
1156 /* HDMI plug in */ 1156 /* HDMI plug in */
1157 if (!sh_hdmi_must_reconfigure(hdmi) && 1157 if (!sh_hdmi_must_reconfigure(hdmi) &&
@@ -1171,7 +1171,7 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1171 fb_set_suspend(info, 0); 1171 fb_set_suspend(info, 0);
1172 } 1172 }
1173 1173
1174 release_console_sem(); 1174 console_unlock();
1175 } else { 1175 } else {
1176 ret = 0; 1176 ret = 0;
1177 if (!hdmi->info) 1177 if (!hdmi->info)
@@ -1181,12 +1181,12 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1181 fb_destroy_modedb(hdmi->monspec.modedb); 1181 fb_destroy_modedb(hdmi->monspec.modedb);
1182 hdmi->monspec.modedb = NULL; 1182 hdmi->monspec.modedb = NULL;
1183 1183
1184 acquire_console_sem(); 1184 console_lock();
1185 1185
1186 /* HDMI disconnect */ 1186 /* HDMI disconnect */
1187 fb_set_suspend(hdmi->info, 1); 1187 fb_set_suspend(hdmi->info, 1);
1188 1188
1189 release_console_sem(); 1189 console_unlock();
1190 pm_runtime_put(hdmi->dev); 1190 pm_runtime_put(hdmi->dev);
1191 } 1191 }
1192 1192
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bd4840a8a6b7..bf12e53aed5c 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -912,9 +912,9 @@ static int sh_mobile_release(struct fb_info *info, int user)
912 912
913 /* Nothing to reconfigure, when called from fbcon */ 913 /* Nothing to reconfigure, when called from fbcon */
914 if (user) { 914 if (user) {
915 acquire_console_sem(); 915 console_lock();
916 sh_mobile_fb_reconfig(info); 916 sh_mobile_fb_reconfig(info);
917 release_console_sem(); 917 console_unlock();
918 } 918 }
919 919
920 mutex_unlock(&ch->open_lock); 920 mutex_unlock(&ch->open_lock);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index b7dc1800efa9..bcb44a594ebc 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2010,9 +2010,9 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
2010 2010
2011 /* tell console/fb driver we are suspending */ 2011 /* tell console/fb driver we are suspending */
2012 2012
2013 acquire_console_sem(); 2013 console_lock();
2014 fb_set_suspend(fbi, 1); 2014 fb_set_suspend(fbi, 1);
2015 release_console_sem(); 2015 console_unlock();
2016 2016
2017 /* backup copies in case chip is powered down over suspend */ 2017 /* backup copies in case chip is powered down over suspend */
2018 2018
@@ -2069,9 +2069,9 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
2069 memcpy_toio(par->cursor.k_addr, par->store_cursor, 2069 memcpy_toio(par->cursor.k_addr, par->store_cursor,
2070 par->cursor.size); 2070 par->cursor.size);
2071 2071
2072 acquire_console_sem(); 2072 console_lock();
2073 fb_set_suspend(fbi, 0); 2073 fb_set_suspend(fbi, 0);
2074 release_console_sem(); 2074 console_unlock();
2075 2075
2076 vfree(par->store_fb); 2076 vfree(par->store_fb);
2077 vfree(par->store_cursor); 2077 vfree(par->store_cursor);
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 6913fe168c25..dfef88c803d4 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -25,7 +25,7 @@
25#include <linux/fb.h> 25#include <linux/fb.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28/* Why should fb driver call console functions? because acquire_console_sem() */ 28/* Why should fb driver call console functions? because console_lock() */
29#include <linux/console.h> 29#include <linux/console.h>
30#include <linux/mfd/core.h> 30#include <linux/mfd/core.h>
31#include <linux/mfd/tmio.h> 31#include <linux/mfd/tmio.h>
@@ -944,7 +944,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
944 struct mfd_cell *cell = dev->dev.platform_data; 944 struct mfd_cell *cell = dev->dev.platform_data;
945 int retval = 0; 945 int retval = 0;
946 946
947 acquire_console_sem(); 947 console_lock();
948 948
949 fb_set_suspend(info, 1); 949 fb_set_suspend(info, 1);
950 950
@@ -965,7 +965,7 @@ static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
965 if (cell->suspend) 965 if (cell->suspend)
966 retval = cell->suspend(dev); 966 retval = cell->suspend(dev);
967 967
968 release_console_sem(); 968 console_unlock();
969 969
970 return retval; 970 return retval;
971} 971}
@@ -976,7 +976,7 @@ static int tmiofb_resume(struct platform_device *dev)
976 struct mfd_cell *cell = dev->dev.platform_data; 976 struct mfd_cell *cell = dev->dev.platform_data;
977 int retval = 0; 977 int retval = 0;
978 978
979 acquire_console_sem(); 979 console_lock();
980 980
981 if (cell->resume) { 981 if (cell->resume) {
982 retval = cell->resume(dev); 982 retval = cell->resume(dev);
@@ -992,7 +992,7 @@ static int tmiofb_resume(struct platform_device *dev)
992 992
993 fb_set_suspend(info, 0); 993 fb_set_suspend(info, 0);
994out: 994out:
995 release_console_sem(); 995 console_unlock();
996 return retval; 996 return retval;
997} 997}
998#else 998#else
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 289edd519527..4e66349e4366 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1674,17 +1674,17 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
1674#ifdef CONFIG_PM 1674#ifdef CONFIG_PM
1675static int viafb_suspend(void *unused) 1675static int viafb_suspend(void *unused)
1676{ 1676{
1677 acquire_console_sem(); 1677 console_lock();
1678 fb_set_suspend(viafbinfo, 1); 1678 fb_set_suspend(viafbinfo, 1);
1679 viafb_sync(viafbinfo); 1679 viafb_sync(viafbinfo);
1680 release_console_sem(); 1680 console_unlock();
1681 1681
1682 return 0; 1682 return 0;
1683} 1683}
1684 1684
1685static int viafb_resume(void *unused) 1685static int viafb_resume(void *unused)
1686{ 1686{
1687 acquire_console_sem(); 1687 console_lock();
1688 if (viaparinfo->shared->vdev->engine_mmio) 1688 if (viaparinfo->shared->vdev->engine_mmio)
1689 viafb_reset_engine(viaparinfo); 1689 viafb_reset_engine(viaparinfo);
1690 viafb_set_par(viafbinfo); 1690 viafb_set_par(viafbinfo);
@@ -1692,7 +1692,7 @@ static int viafb_resume(void *unused)
1692 viafb_set_par(viafbinfo1); 1692 viafb_set_par(viafbinfo1);
1693 fb_set_suspend(viafbinfo, 0); 1693 fb_set_suspend(viafbinfo, 0);
1694 1694
1695 release_console_sem(); 1695 console_unlock();
1696 return 0; 1696 return 0;
1697} 1697}
1698 1698
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 85d76ec4c63e..a2965ab92cfb 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -23,7 +23,7 @@
23#include <linux/svga.h> 23#include <linux/svga.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */ 26#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
27#include <video/vga.h> 27#include <video/vga.h>
28 28
29#ifdef CONFIG_MTRR 29#ifdef CONFIG_MTRR
@@ -819,12 +819,12 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
819 819
820 dev_info(info->device, "suspend\n"); 820 dev_info(info->device, "suspend\n");
821 821
822 acquire_console_sem(); 822 console_lock();
823 mutex_lock(&(par->open_lock)); 823 mutex_lock(&(par->open_lock));
824 824
825 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) { 825 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
826 mutex_unlock(&(par->open_lock)); 826 mutex_unlock(&(par->open_lock));
827 release_console_sem(); 827 console_unlock();
828 return 0; 828 return 0;
829 } 829 }
830 830
@@ -835,7 +835,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
835 pci_set_power_state(dev, pci_choose_state(dev, state)); 835 pci_set_power_state(dev, pci_choose_state(dev, state));
836 836
837 mutex_unlock(&(par->open_lock)); 837 mutex_unlock(&(par->open_lock));
838 release_console_sem(); 838 console_unlock();
839 839
840 return 0; 840 return 0;
841} 841}
@@ -850,7 +850,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
850 850
851 dev_info(info->device, "resume\n"); 851 dev_info(info->device, "resume\n");
852 852
853 acquire_console_sem(); 853 console_lock();
854 mutex_lock(&(par->open_lock)); 854 mutex_lock(&(par->open_lock));
855 855
856 if (par->ref_count == 0) 856 if (par->ref_count == 0)
@@ -869,7 +869,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
869 869
870fail: 870fail:
871 mutex_unlock(&(par->open_lock)); 871 mutex_unlock(&(par->open_lock));
872 release_console_sem(); 872 console_unlock();
873 873
874 return 0; 874 return 0;
875} 875}
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 3e6934d4bea8..a20218c2fda8 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -491,12 +491,12 @@ xenfb_make_preferred_console(void)
491 if (console_set_on_cmdline) 491 if (console_set_on_cmdline)
492 return; 492 return;
493 493
494 acquire_console_sem(); 494 console_lock();
495 for_each_console(c) { 495 for_each_console(c) {
496 if (!strcmp(c->name, "tty") && c->index == 0) 496 if (!strcmp(c->name, "tty") && c->index == 0)
497 break; 497 break;
498 } 498 }
499 release_console_sem(); 499 console_unlock();
500 if (c) { 500 if (c) {
501 unregister_console(c); 501 unregister_console(c);
502 c->flags |= CON_CONSDEV; 502 c->flags |= CON_CONSDEV;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index ef8d9d558fc7..4fb5b2bf2348 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
96 96
97MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); 97MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
98 98
99/* A PCI device has it's own struct device and so does a virtio device so
100 * we create a place for the virtio devices to show up in sysfs. I think it
101 * would make more sense for virtio to not insist on having it's own device. */
102static struct device *virtio_pci_root;
103
104/* Convert a generic virtio device to our structure */ 99/* Convert a generic virtio device to our structure */
105static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 100static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
106{ 101{
@@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
629 if (vp_dev == NULL) 624 if (vp_dev == NULL)
630 return -ENOMEM; 625 return -ENOMEM;
631 626
632 vp_dev->vdev.dev.parent = virtio_pci_root; 627 vp_dev->vdev.dev.parent = &pci_dev->dev;
633 vp_dev->vdev.dev.release = virtio_pci_release_dev; 628 vp_dev->vdev.dev.release = virtio_pci_release_dev;
634 vp_dev->vdev.config = &virtio_pci_config_ops; 629 vp_dev->vdev.config = &virtio_pci_config_ops;
635 vp_dev->pci_dev = pci_dev; 630 vp_dev->pci_dev = pci_dev;
@@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = {
717 712
718static int __init virtio_pci_init(void) 713static int __init virtio_pci_init(void)
719{ 714{
720 int err; 715 return pci_register_driver(&virtio_pci_driver);
721
722 virtio_pci_root = root_device_register("virtio-pci");
723 if (IS_ERR(virtio_pci_root))
724 return PTR_ERR(virtio_pci_root);
725
726 err = pci_register_driver(&virtio_pci_driver);
727 if (err)
728 root_device_unregister(virtio_pci_root);
729
730 return err;
731} 716}
732 717
733module_init(virtio_pci_init); 718module_init(virtio_pci_init);
@@ -735,7 +720,6 @@ module_init(virtio_pci_init);
735static void __exit virtio_pci_exit(void) 720static void __exit virtio_pci_exit(void)
736{ 721{
737 pci_unregister_driver(&virtio_pci_driver); 722 pci_unregister_driver(&virtio_pci_driver);
738 root_device_unregister(virtio_pci_root);
739} 723}
740 724
741module_exit(virtio_pci_exit); 725module_exit(virtio_pci_exit);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff8a746..38e96ab90945 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
593 593
594 /* get interface & functional clock objects */ 594 /* get interface & functional clock objects */
595 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); 595 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
596 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); 596 if (IS_ERR(hdq_data->hdq_ick)) {
597 dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
598 ret = PTR_ERR(hdq_data->hdq_ick);
599 goto err_ick;
600 }
597 601
598 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) { 602 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
599 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n"); 603 if (IS_ERR(hdq_data->hdq_fck)) {
600 if (IS_ERR(hdq_data->hdq_ick)) { 604 dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
601 ret = PTR_ERR(hdq_data->hdq_ick); 605 ret = PTR_ERR(hdq_data->hdq_fck);
602 goto err_clk; 606 goto err_fck;
603 }
604 if (IS_ERR(hdq_data->hdq_fck)) {
605 ret = PTR_ERR(hdq_data->hdq_fck);
606 clk_put(hdq_data->hdq_ick);
607 goto err_clk;
608 }
609 } 607 }
610 608
611 hdq_data->hdq_usecount = 0; 609 hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@ err_fnclk:
665 clk_disable(hdq_data->hdq_ick); 663 clk_disable(hdq_data->hdq_ick);
666 664
667err_intfclk: 665err_intfclk:
668 clk_put(hdq_data->hdq_ick);
669 clk_put(hdq_data->hdq_fck); 666 clk_put(hdq_data->hdq_fck);
670 667
671err_clk: 668err_fck:
669 clk_put(hdq_data->hdq_ick);
670
671err_ick:
672 iounmap(hdq_data->hdq_base); 672 iounmap(hdq_data->hdq_base);
673 673
674err_ioremap: 674err_ioremap:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 5a48ce996dea..07bec09d1dad 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -71,11 +71,18 @@ config XEN_SYS_HYPERVISOR
71 but will have no xen contents. 71 but will have no xen contents.
72 72
73config XEN_XENBUS_FRONTEND 73config XEN_XENBUS_FRONTEND
74 tristate 74 tristate
75
76config XEN_GNTDEV
77 tristate "userspace grant access device driver"
78 depends on XEN
79 select MMU_NOTIFIER
80 help
81 Allows userspace processes to use grants.
75 82
76config XEN_PLATFORM_PCI 83config XEN_PLATFORM_PCI
77 tristate "xen platform pci device driver" 84 tristate "xen platform pci device driver"
78 depends on XEN_PVHVM 85 depends on XEN_PVHVM && PCI
79 default m 86 default m
80 help 87 help
81 Driver for the Xen PCI Platform device: it is responsible for 88 Driver for the Xen PCI Platform device: it is responsible for
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 533a199e7a3f..5088cc2e6fe2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -9,11 +9,14 @@ obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
10obj-$(CONFIG_XEN_BALLOON) += balloon.o 10obj-$(CONFIG_XEN_BALLOON) += balloon.o
11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o 11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
12obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
12obj-$(CONFIG_XENFS) += xenfs/ 13obj-$(CONFIG_XENFS) += xenfs/
13obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o 14obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
14obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o 15obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
15obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 16obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
16obj-$(CONFIG_XEN_DOM0) += pci.o 17obj-$(CONFIG_XEN_DOM0) += pci.o
17 18
18xen-evtchn-y := evtchn.o 19xen-evtchn-y := evtchn.o
20xen-gntdev-y := gntdev.o
19 21
22xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
new file mode 100644
index 000000000000..1e31cdcdae1e
--- /dev/null
+++ b/drivers/xen/gntdev.c
@@ -0,0 +1,665 @@
1/******************************************************************************
2 * gntdev.c
3 *
4 * Device for accessing (in user-space) pages that have been granted by other
5 * domains.
6 *
7 * Copyright (c) 2006-2007, D G Murray.
8 * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#undef DEBUG
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/miscdevice.h>
26#include <linux/fs.h>
27#include <linux/mm.h>
28#include <linux/mman.h>
29#include <linux/mmu_notifier.h>
30#include <linux/types.h>
31#include <linux/uaccess.h>
32#include <linux/sched.h>
33#include <linux/spinlock.h>
34#include <linux/slab.h>
35
36#include <xen/xen.h>
37#include <xen/grant_table.h>
38#include <xen/gntdev.h>
39#include <asm/xen/hypervisor.h>
40#include <asm/xen/hypercall.h>
41#include <asm/xen/page.h>
42
43MODULE_LICENSE("GPL");
44MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
45 "Gerd Hoffmann <kraxel@redhat.com>");
46MODULE_DESCRIPTION("User-space granted page access driver");
47
48static int limit = 1024;
49module_param(limit, int, 0644);
50MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
51 "once by a gntdev instance");
52
53struct gntdev_priv {
54 struct list_head maps;
55 uint32_t used;
56 uint32_t limit;
57 /* lock protects maps from concurrent changes */
58 spinlock_t lock;
59 struct mm_struct *mm;
60 struct mmu_notifier mn;
61};
62
63struct grant_map {
64 struct list_head next;
65 struct gntdev_priv *priv;
66 struct vm_area_struct *vma;
67 int index;
68 int count;
69 int flags;
70 int is_mapped;
71 struct ioctl_gntdev_grant_ref *grants;
72 struct gnttab_map_grant_ref *map_ops;
73 struct gnttab_unmap_grant_ref *unmap_ops;
74 struct page **pages;
75};
76
77/* ------------------------------------------------------------------ */
78
79static void gntdev_print_maps(struct gntdev_priv *priv,
80 char *text, int text_index)
81{
82#ifdef DEBUG
83 struct grant_map *map;
84
85 pr_debug("maps list (priv %p, usage %d/%d)\n",
86 priv, priv->used, priv->limit);
87
88 list_for_each_entry(map, &priv->maps, next)
89 pr_debug(" index %2d, count %2d %s\n",
90 map->index, map->count,
91 map->index == text_index && text ? text : "");
92#endif
93}
94
95static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
96{
97 struct grant_map *add;
98 int i;
99
100 add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
101 if (NULL == add)
102 return NULL;
103
104 add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
105 add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
106 add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
107 add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
108 if (NULL == add->grants ||
109 NULL == add->map_ops ||
110 NULL == add->unmap_ops ||
111 NULL == add->pages)
112 goto err;
113
114 for (i = 0; i < count; i++) {
115 add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
116 if (add->pages[i] == NULL)
117 goto err;
118 }
119
120 add->index = 0;
121 add->count = count;
122 add->priv = priv;
123
124 if (add->count + priv->used > priv->limit)
125 goto err;
126
127 return add;
128
129err:
130 if (add->pages)
131 for (i = 0; i < count; i++) {
132 if (add->pages[i])
133 __free_page(add->pages[i]);
134 }
135 kfree(add->pages);
136 kfree(add->grants);
137 kfree(add->map_ops);
138 kfree(add->unmap_ops);
139 kfree(add);
140 return NULL;
141}
142
143static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
144{
145 struct grant_map *map;
146
147 list_for_each_entry(map, &priv->maps, next) {
148 if (add->index + add->count < map->index) {
149 list_add_tail(&add->next, &map->next);
150 goto done;
151 }
152 add->index = map->index + map->count;
153 }
154 list_add_tail(&add->next, &priv->maps);
155
156done:
157 priv->used += add->count;
158 gntdev_print_maps(priv, "[new]", add->index);
159}
160
161static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
162 int index, int count)
163{
164 struct grant_map *map;
165
166 list_for_each_entry(map, &priv->maps, next) {
167 if (map->index != index)
168 continue;
169 if (map->count != count)
170 continue;
171 return map;
172 }
173 return NULL;
174}
175
176static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
177 unsigned long vaddr)
178{
179 struct grant_map *map;
180
181 list_for_each_entry(map, &priv->maps, next) {
182 if (!map->vma)
183 continue;
184 if (vaddr < map->vma->vm_start)
185 continue;
186 if (vaddr >= map->vma->vm_end)
187 continue;
188 return map;
189 }
190 return NULL;
191}
192
193static int gntdev_del_map(struct grant_map *map)
194{
195 int i;
196
197 if (map->vma)
198 return -EBUSY;
199 for (i = 0; i < map->count; i++)
200 if (map->unmap_ops[i].handle)
201 return -EBUSY;
202
203 map->priv->used -= map->count;
204 list_del(&map->next);
205 return 0;
206}
207
208static void gntdev_free_map(struct grant_map *map)
209{
210 int i;
211
212 if (!map)
213 return;
214
215 if (map->pages)
216 for (i = 0; i < map->count; i++) {
217 if (map->pages[i])
218 __free_page(map->pages[i]);
219 }
220 kfree(map->pages);
221 kfree(map->grants);
222 kfree(map->map_ops);
223 kfree(map->unmap_ops);
224 kfree(map);
225}
226
227/* ------------------------------------------------------------------ */
228
229static int find_grant_ptes(pte_t *pte, pgtable_t token,
230 unsigned long addr, void *data)
231{
232 struct grant_map *map = data;
233 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
234 u64 pte_maddr;
235
236 BUG_ON(pgnr >= map->count);
237 pte_maddr = arbitrary_virt_to_machine(pte).maddr;
238
239 gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
240 GNTMAP_contains_pte | map->flags,
241 map->grants[pgnr].ref,
242 map->grants[pgnr].domid);
243 gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
244 GNTMAP_contains_pte | map->flags,
245 0 /* handle */);
246 return 0;
247}
248
249static int map_grant_pages(struct grant_map *map)
250{
251 int i, err = 0;
252
253 pr_debug("map %d+%d\n", map->index, map->count);
254 err = gnttab_map_refs(map->map_ops, map->pages, map->count);
255 if (err)
256 return err;
257
258 for (i = 0; i < map->count; i++) {
259 if (map->map_ops[i].status)
260 err = -EINVAL;
261 map->unmap_ops[i].handle = map->map_ops[i].handle;
262 }
263 return err;
264}
265
266static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
267{
268 int i, err = 0;
269
270 pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
271 err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
272 if (err)
273 return err;
274
275 for (i = 0; i < pages; i++) {
276 if (map->unmap_ops[offset+i].status)
277 err = -EINVAL;
278 map->unmap_ops[offset+i].handle = 0;
279 }
280 return err;
281}
282
283/* ------------------------------------------------------------------ */
284
285static void gntdev_vma_close(struct vm_area_struct *vma)
286{
287 struct grant_map *map = vma->vm_private_data;
288
289 pr_debug("close %p\n", vma);
290 map->is_mapped = 0;
291 map->vma = NULL;
292 vma->vm_private_data = NULL;
293}
294
295static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
296{
297 pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
298 vmf->virtual_address, vmf->pgoff);
299 vmf->flags = VM_FAULT_ERROR;
300 return 0;
301}
302
303static struct vm_operations_struct gntdev_vmops = {
304 .close = gntdev_vma_close,
305 .fault = gntdev_vma_fault,
306};
307
308/* ------------------------------------------------------------------ */
309
310static void mn_invl_range_start(struct mmu_notifier *mn,
311 struct mm_struct *mm,
312 unsigned long start, unsigned long end)
313{
314 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
315 struct grant_map *map;
316 unsigned long mstart, mend;
317 int err;
318
319 spin_lock(&priv->lock);
320 list_for_each_entry(map, &priv->maps, next) {
321 if (!map->vma)
322 continue;
323 if (!map->is_mapped)
324 continue;
325 if (map->vma->vm_start >= end)
326 continue;
327 if (map->vma->vm_end <= start)
328 continue;
329 mstart = max(start, map->vma->vm_start);
330 mend = min(end, map->vma->vm_end);
331 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
332 map->index, map->count,
333 map->vma->vm_start, map->vma->vm_end,
334 start, end, mstart, mend);
335 err = unmap_grant_pages(map,
336 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
337 (mend - mstart) >> PAGE_SHIFT);
338 WARN_ON(err);
339 }
340 spin_unlock(&priv->lock);
341}
342
343static void mn_invl_page(struct mmu_notifier *mn,
344 struct mm_struct *mm,
345 unsigned long address)
346{
347 mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
348}
349
350static void mn_release(struct mmu_notifier *mn,
351 struct mm_struct *mm)
352{
353 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
354 struct grant_map *map;
355 int err;
356
357 spin_lock(&priv->lock);
358 list_for_each_entry(map, &priv->maps, next) {
359 if (!map->vma)
360 continue;
361 pr_debug("map %d+%d (%lx %lx)\n",
362 map->index, map->count,
363 map->vma->vm_start, map->vma->vm_end);
364 err = unmap_grant_pages(map, /* offset */ 0, map->count);
365 WARN_ON(err);
366 }
367 spin_unlock(&priv->lock);
368}
369
370struct mmu_notifier_ops gntdev_mmu_ops = {
371 .release = mn_release,
372 .invalidate_page = mn_invl_page,
373 .invalidate_range_start = mn_invl_range_start,
374};
375
376/* ------------------------------------------------------------------ */
377
378static int gntdev_open(struct inode *inode, struct file *flip)
379{
380 struct gntdev_priv *priv;
381 int ret = 0;
382
383 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
384 if (!priv)
385 return -ENOMEM;
386
387 INIT_LIST_HEAD(&priv->maps);
388 spin_lock_init(&priv->lock);
389 priv->limit = limit;
390
391 priv->mm = get_task_mm(current);
392 if (!priv->mm) {
393 kfree(priv);
394 return -ENOMEM;
395 }
396 priv->mn.ops = &gntdev_mmu_ops;
397 ret = mmu_notifier_register(&priv->mn, priv->mm);
398 mmput(priv->mm);
399
400 if (ret) {
401 kfree(priv);
402 return ret;
403 }
404
405 flip->private_data = priv;
406 pr_debug("priv %p\n", priv);
407
408 return 0;
409}
410
411static int gntdev_release(struct inode *inode, struct file *flip)
412{
413 struct gntdev_priv *priv = flip->private_data;
414 struct grant_map *map;
415 int err;
416
417 pr_debug("priv %p\n", priv);
418
419 spin_lock(&priv->lock);
420 while (!list_empty(&priv->maps)) {
421 map = list_entry(priv->maps.next, struct grant_map, next);
422 err = gntdev_del_map(map);
423 if (WARN_ON(err))
424 gntdev_free_map(map);
425
426 }
427 spin_unlock(&priv->lock);
428
429 mmu_notifier_unregister(&priv->mn, priv->mm);
430 kfree(priv);
431 return 0;
432}
433
434static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
435 struct ioctl_gntdev_map_grant_ref __user *u)
436{
437 struct ioctl_gntdev_map_grant_ref op;
438 struct grant_map *map;
439 int err;
440
441 if (copy_from_user(&op, u, sizeof(op)) != 0)
442 return -EFAULT;
443 pr_debug("priv %p, add %d\n", priv, op.count);
444 if (unlikely(op.count <= 0))
445 return -EINVAL;
446 if (unlikely(op.count > priv->limit))
447 return -EINVAL;
448
449 err = -ENOMEM;
450 map = gntdev_alloc_map(priv, op.count);
451 if (!map)
452 return err;
453 if (copy_from_user(map->grants, &u->refs,
454 sizeof(map->grants[0]) * op.count) != 0) {
455 gntdev_free_map(map);
456 return err;
457 }
458
459 spin_lock(&priv->lock);
460 gntdev_add_map(priv, map);
461 op.index = map->index << PAGE_SHIFT;
462 spin_unlock(&priv->lock);
463
464 if (copy_to_user(u, &op, sizeof(op)) != 0) {
465 spin_lock(&priv->lock);
466 gntdev_del_map(map);
467 spin_unlock(&priv->lock);
468 gntdev_free_map(map);
469 return err;
470 }
471 return 0;
472}
473
474static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
475 struct ioctl_gntdev_unmap_grant_ref __user *u)
476{
477 struct ioctl_gntdev_unmap_grant_ref op;
478 struct grant_map *map;
479 int err = -ENOENT;
480
481 if (copy_from_user(&op, u, sizeof(op)) != 0)
482 return -EFAULT;
483 pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
484
485 spin_lock(&priv->lock);
486 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
487 if (map)
488 err = gntdev_del_map(map);
489 spin_unlock(&priv->lock);
490 if (!err)
491 gntdev_free_map(map);
492 return err;
493}
494
495static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
496 struct ioctl_gntdev_get_offset_for_vaddr __user *u)
497{
498 struct ioctl_gntdev_get_offset_for_vaddr op;
499 struct grant_map *map;
500
501 if (copy_from_user(&op, u, sizeof(op)) != 0)
502 return -EFAULT;
503 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
504
505 spin_lock(&priv->lock);
506 map = gntdev_find_map_vaddr(priv, op.vaddr);
507 if (map == NULL ||
508 map->vma->vm_start != op.vaddr) {
509 spin_unlock(&priv->lock);
510 return -EINVAL;
511 }
512 op.offset = map->index << PAGE_SHIFT;
513 op.count = map->count;
514 spin_unlock(&priv->lock);
515
516 if (copy_to_user(u, &op, sizeof(op)) != 0)
517 return -EFAULT;
518 return 0;
519}
520
521static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
522 struct ioctl_gntdev_set_max_grants __user *u)
523{
524 struct ioctl_gntdev_set_max_grants op;
525
526 if (copy_from_user(&op, u, sizeof(op)) != 0)
527 return -EFAULT;
528 pr_debug("priv %p, limit %d\n", priv, op.count);
529 if (op.count > limit)
530 return -E2BIG;
531
532 spin_lock(&priv->lock);
533 priv->limit = op.count;
534 spin_unlock(&priv->lock);
535 return 0;
536}
537
538static long gntdev_ioctl(struct file *flip,
539 unsigned int cmd, unsigned long arg)
540{
541 struct gntdev_priv *priv = flip->private_data;
542 void __user *ptr = (void __user *)arg;
543
544 switch (cmd) {
545 case IOCTL_GNTDEV_MAP_GRANT_REF:
546 return gntdev_ioctl_map_grant_ref(priv, ptr);
547
548 case IOCTL_GNTDEV_UNMAP_GRANT_REF:
549 return gntdev_ioctl_unmap_grant_ref(priv, ptr);
550
551 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
552 return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
553
554 case IOCTL_GNTDEV_SET_MAX_GRANTS:
555 return gntdev_ioctl_set_max_grants(priv, ptr);
556
557 default:
558 pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
559 return -ENOIOCTLCMD;
560 }
561
562 return 0;
563}
564
565static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
566{
567 struct gntdev_priv *priv = flip->private_data;
568 int index = vma->vm_pgoff;
569 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
570 struct grant_map *map;
571 int err = -EINVAL;
572
573 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
574 return -EINVAL;
575
576 pr_debug("map %d+%d at %lx (pgoff %lx)\n",
577 index, count, vma->vm_start, vma->vm_pgoff);
578
579 spin_lock(&priv->lock);
580 map = gntdev_find_map_index(priv, index, count);
581 if (!map)
582 goto unlock_out;
583 if (map->vma)
584 goto unlock_out;
585 if (priv->mm != vma->vm_mm) {
586 printk(KERN_WARNING "Huh? Other mm?\n");
587 goto unlock_out;
588 }
589
590 vma->vm_ops = &gntdev_vmops;
591
592 vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
593
594 vma->vm_private_data = map;
595 map->vma = vma;
596
597 map->flags = GNTMAP_host_map | GNTMAP_application_map;
598 if (!(vma->vm_flags & VM_WRITE))
599 map->flags |= GNTMAP_readonly;
600
601 spin_unlock(&priv->lock);
602
603 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
604 vma->vm_end - vma->vm_start,
605 find_grant_ptes, map);
606 if (err) {
607 printk(KERN_WARNING "find_grant_ptes() failure.\n");
608 return err;
609 }
610
611 err = map_grant_pages(map);
612 if (err) {
613 printk(KERN_WARNING "map_grant_pages() failure.\n");
614 return err;
615 }
616
617 map->is_mapped = 1;
618
619 return 0;
620
621unlock_out:
622 spin_unlock(&priv->lock);
623 return err;
624}
625
626static const struct file_operations gntdev_fops = {
627 .owner = THIS_MODULE,
628 .open = gntdev_open,
629 .release = gntdev_release,
630 .mmap = gntdev_mmap,
631 .unlocked_ioctl = gntdev_ioctl
632};
633
634static struct miscdevice gntdev_miscdev = {
635 .minor = MISC_DYNAMIC_MINOR,
636 .name = "xen/gntdev",
637 .fops = &gntdev_fops,
638};
639
640/* ------------------------------------------------------------------ */
641
642static int __init gntdev_init(void)
643{
644 int err;
645
646 if (!xen_domain())
647 return -ENODEV;
648
649 err = misc_register(&gntdev_miscdev);
650 if (err != 0) {
651 printk(KERN_ERR "Could not register gntdev device\n");
652 return err;
653 }
654 return 0;
655}
656
657static void __exit gntdev_exit(void)
658{
659 misc_deregister(&gntdev_miscdev);
660}
661
662module_init(gntdev_init);
663module_exit(gntdev_exit);
664
665/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6c4531816496..9ef54ebc1194 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -447,6 +447,52 @@ unsigned int gnttab_max_grant_frames(void)
447} 447}
448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
449 449
450int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451 struct page **pages, unsigned int count)
452{
453 int i, ret;
454 pte_t *pte;
455 unsigned long mfn;
456
457 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
458 if (ret)
459 return ret;
460
461 for (i = 0; i < count; i++) {
462 /* m2p override only supported for GNTMAP_contains_pte mappings */
463 if (!(map_ops[i].flags & GNTMAP_contains_pte))
464 continue;
465 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
466 (map_ops[i].host_addr & ~PAGE_MASK));
467 mfn = pte_mfn(*pte);
468 ret = m2p_add_override(mfn, pages[i]);
469 if (ret)
470 return ret;
471 }
472
473 return ret;
474}
475EXPORT_SYMBOL_GPL(gnttab_map_refs);
476
477int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
478 struct page **pages, unsigned int count)
479{
480 int i, ret;
481
482 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
483 if (ret)
484 return ret;
485
486 for (i = 0; i < count; i++) {
487 ret = m2p_remove_override(pages[i]);
488 if (ret)
489 return ret;
490 }
491
492 return ret;
493}
494EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
495
450static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 496static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
451{ 497{
452 struct gnttab_setup_table setup; 498 struct gnttab_setup_table setup;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index c01b5ddce529..afbe041f42c5 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -105,7 +105,7 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
105 const struct pci_device_id *ent) 105 const struct pci_device_id *ent)
106{ 106{
107 int i, ret; 107 int i, ret;
108 long ioaddr, iolen; 108 long ioaddr;
109 long mmio_addr, mmio_len; 109 long mmio_addr, mmio_len;
110 unsigned int max_nr_gframes; 110 unsigned int max_nr_gframes;
111 111
@@ -114,7 +114,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
114 return i; 114 return i;
115 115
116 ioaddr = pci_resource_start(pdev, 0); 116 ioaddr = pci_resource_start(pdev, 0);
117 iolen = pci_resource_len(pdev, 0);
118 117
119 mmio_addr = pci_resource_start(pdev, 1); 118 mmio_addr = pci_resource_start(pdev, 1);
120 mmio_len = pci_resource_len(pdev, 1); 119 mmio_len = pci_resource_len(pdev, 1);
@@ -125,19 +124,13 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
125 goto pci_out; 124 goto pci_out;
126 } 125 }
127 126
128 if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { 127 ret = pci_request_region(pdev, 1, DRV_NAME);
129 dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n", 128 if (ret < 0)
130 mmio_addr, mmio_len);
131 ret = -EBUSY;
132 goto pci_out; 129 goto pci_out;
133 }
134 130
135 if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { 131 ret = pci_request_region(pdev, 0, DRV_NAME);
136 dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n", 132 if (ret < 0)
137 iolen, ioaddr);
138 ret = -EBUSY;
139 goto mem_out; 133 goto mem_out;
140 }
141 134
142 platform_mmio = mmio_addr; 135 platform_mmio = mmio_addr;
143 platform_mmiolen = mmio_len; 136 platform_mmiolen = mmio_len;
@@ -169,9 +162,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
169 return 0; 162 return 0;
170 163
171out: 164out:
172 release_region(ioaddr, iolen); 165 pci_release_region(pdev, 0);
173mem_out: 166mem_out:
174 release_mem_region(mmio_addr, mmio_len); 167 pci_release_region(pdev, 1);
175pci_out: 168pci_out:
176 pci_disable_device(pdev); 169 pci_disable_device(pdev);
177 return ret; 170 return ret;
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 1c1236087f78..bbd000f88af7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -122,6 +122,7 @@ static ssize_t xenbus_file_read(struct file *filp,
122 int ret; 122 int ret;
123 123
124 mutex_lock(&u->reply_mutex); 124 mutex_lock(&u->reply_mutex);
125again:
125 while (list_empty(&u->read_buffers)) { 126 while (list_empty(&u->read_buffers)) {
126 mutex_unlock(&u->reply_mutex); 127 mutex_unlock(&u->reply_mutex);
127 if (filp->f_flags & O_NONBLOCK) 128 if (filp->f_flags & O_NONBLOCK)
@@ -144,7 +145,7 @@ static ssize_t xenbus_file_read(struct file *filp,
144 i += sz - ret; 145 i += sz - ret;
145 rb->cons += sz - ret; 146 rb->cons += sz - ret;
146 147
147 if (ret != sz) { 148 if (ret != 0) {
148 if (i == 0) 149 if (i == 0)
149 i = -EFAULT; 150 i = -EFAULT;
150 goto out; 151 goto out;
@@ -160,6 +161,8 @@ static ssize_t xenbus_file_read(struct file *filp,
160 struct read_buffer, list); 161 struct read_buffer, list);
161 } 162 }
162 } 163 }
164 if (i == 0)
165 goto again;
163 166
164out: 167out:
165 mutex_unlock(&u->reply_mutex); 168 mutex_unlock(&u->reply_mutex);
@@ -407,6 +410,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
407 410
408 mutex_lock(&u->reply_mutex); 411 mutex_lock(&u->reply_mutex);
409 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); 412 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
413 wake_up(&u->read_waitq);
410 mutex_unlock(&u->reply_mutex); 414 mutex_unlock(&u->reply_mutex);
411 } 415 }
412 416
@@ -455,7 +459,7 @@ static ssize_t xenbus_file_write(struct file *filp,
455 459
456 ret = copy_from_user(u->u.buffer + u->len, ubuf, len); 460 ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
457 461
458 if (ret == len) { 462 if (ret != 0) {
459 rc = -EFAULT; 463 rc = -EFAULT;
460 goto out; 464 goto out;
461 } 465 }
@@ -488,21 +492,6 @@ static ssize_t xenbus_file_write(struct file *filp,
488 msg_type = u->u.msg.type; 492 msg_type = u->u.msg.type;
489 493
490 switch (msg_type) { 494 switch (msg_type) {
491 case XS_TRANSACTION_START:
492 case XS_TRANSACTION_END:
493 case XS_DIRECTORY:
494 case XS_READ:
495 case XS_GET_PERMS:
496 case XS_RELEASE:
497 case XS_GET_DOMAIN_PATH:
498 case XS_WRITE:
499 case XS_MKDIR:
500 case XS_RM:
501 case XS_SET_PERMS:
502 /* Send out a transaction */
503 ret = xenbus_write_transaction(msg_type, u);
504 break;
505
506 case XS_WATCH: 495 case XS_WATCH:
507 case XS_UNWATCH: 496 case XS_UNWATCH:
508 /* (Un)Ask for some path to be watched for changes */ 497 /* (Un)Ask for some path to be watched for changes */
@@ -510,7 +499,8 @@ static ssize_t xenbus_file_write(struct file *filp,
510 break; 499 break;
511 500
512 default: 501 default:
513 ret = -EINVAL; 502 /* Send out a transaction */
503 ret = xenbus_write_transaction(msg_type, u);
514 break; 504 break;
515 } 505 }
516 if (ret != 0) 506 if (ret != 0)
@@ -555,6 +545,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
555 struct xenbus_file_priv *u = filp->private_data; 545 struct xenbus_file_priv *u = filp->private_data;
556 struct xenbus_transaction_holder *trans, *tmp; 546 struct xenbus_transaction_holder *trans, *tmp;
557 struct watch_adapter *watch, *tmp_watch; 547 struct watch_adapter *watch, *tmp_watch;
548 struct read_buffer *rb, *tmp_rb;
558 549
559 /* 550 /*
560 * No need for locking here because there are no other users, 551 * No need for locking here because there are no other users,
@@ -573,6 +564,10 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
573 free_watch_adapter(watch); 564 free_watch_adapter(watch);
574 } 565 }
575 566
567 list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
568 list_del(&rb->list);
569 kfree(rb);
570 }
576 kfree(u); 571 kfree(u);
577 572
578 return 0; 573 return 0;